merge to stable for 5.3 release freeze stable 5.3rc0
authorAugie Fackler <augie@google.com>
Tue, 21 Jan 2020 13:14:51 -0500
branchstable
changeset 44129 84a0102c05c7
parent 44048 61881b170140 (current diff)
parent 44128 ff396501e841 (diff)
child 44130 266c42c60183
merge to stable for 5.3 release freeze
hgext/fix.py
hgext/largefiles/overrides.py
tests/test-fix.t
--- a/.arcconfig	Thu Jan 09 14:19:20 2020 -0500
+++ b/.arcconfig	Tue Jan 21 13:14:51 2020 -0500
@@ -1,5 +1,6 @@
 {
     "conduit_uri": "https://phab.mercurial-scm.org/api",
+    "phabricator.uri": "https://phab.mercurial-scm.org/",
     "repository.callsign": "HG",
     "arc.land.onto.default": "@",
     "base": "hg:.^"
--- a/.hgignore	Thu Jan 09 14:19:20 2020 -0500
+++ b/.hgignore	Tue Jan 21 13:14:51 2020 -0500
@@ -51,6 +51,7 @@
 cscope.*
 .idea/*
 .asv/*
+.pytype/*
 i18n/hg.pot
 locale/*/LC_MESSAGES/hg.mo
 hgext/__index__.py
--- a/Makefile	Thu Jan 09 14:19:20 2020 -0500
+++ b/Makefile	Tue Jan 21 13:14:51 2020 -0500
@@ -11,7 +11,7 @@
 PURE=
 PYFILESCMD=find mercurial hgext doc -name '*.py'
 PYFILES:=$(shell $(PYFILESCMD))
-DOCFILES=mercurial/help/*.txt
+DOCFILES=mercurial/helptext/*.txt
 export LANGUAGE=C
 export LC_ALL=C
 TESTFLAGS ?= $(shell echo $$HGTESTFLAGS)
@@ -189,7 +189,8 @@
   docker-centos6 \
   docker-centos7 \
   docker-centos8 \
-  docker-debian-jessie \
+  docker-debian-bullseye \
+  docker-debian-buster \
   docker-debian-stretch \
   docker-fedora \
   docker-ubuntu-trusty \
--- a/contrib/automation/hgautomation/aws.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/automation/hgautomation/aws.py	Tue Jan 21 13:14:51 2020 -0500
@@ -59,7 +59,7 @@
 UBUNTU_ACCOUNT_ID = '099720109477'
 
 
-WINDOWS_BASE_IMAGE_NAME = 'Windows_Server-2019-English-Full-Base-2019.07.12'
+WINDOWS_BASE_IMAGE_NAME = 'Windows_Server-2019-English-Full-Base-2019.11.13'
 
 
 KEY_PAIRS = {
--- a/contrib/automation/hgautomation/windows.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/automation/hgautomation/windows.py	Tue Jan 21 13:14:51 2020 -0500
@@ -71,7 +71,7 @@
 BUILD_INNO = r'''
 Set-Location C:\hgdev\src
 $python = "C:\hgdev\python27-{arch}\python.exe"
-C:\hgdev\python37-x64\python.exe contrib\packaging\inno\build.py --python $python
+C:\hgdev\python37-x64\python.exe contrib\packaging\packaging.py inno --python $python
 if ($LASTEXITCODE -ne 0) {{
     throw "process exited non-0: $LASTEXITCODE"
 }}
@@ -88,7 +88,7 @@
 BUILD_WIX = r'''
 Set-Location C:\hgdev\src
 $python = "C:\hgdev\python27-{arch}\python.exe"
-C:\hgdev\python37-x64\python.exe contrib\packaging\wix\build.py --python $python {extra_args}
+C:\hgdev\python37-x64\python.exe contrib\packaging\packaging.py wix --python $python {extra_args}
 if ($LASTEXITCODE -ne 0) {{
     throw "process exited non-0: $LASTEXITCODE"
 }}
--- a/contrib/check-code.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/check-code.py	Tue Jan 21 13:14:51 2020 -0500
@@ -281,10 +281,10 @@
     for tp in testpats[i]:
         p = tp[0]
         m = tp[1]
-        if p.startswith(r'^'):
-            p = r"^  [$>] (%s)" % p[1:]
+        if p.startswith('^'):
+            p = "^  [$>] (%s)" % p[1:]
         else:
-            p = r"^  [$>] .*(%s)" % p
+            p = "^  [$>] .*(%s)" % p
         utestpats[i].append((p, m) + tp[2:])
 
 # don't transform the following rules:
--- a/contrib/check-commit	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/check-commit	Tue Jan 21 13:14:51 2020 -0500
@@ -27,32 +27,42 @@
 
 errors = [
     (beforepatch + r".*[(]bc[)]", "(BC) needs to be uppercase"),
-    (beforepatch + r".*[(]issue \d\d\d",
-     "no space allowed between issue and number"),
+    (
+        beforepatch + r".*[(]issue \d\d\d",
+        "no space allowed between issue and number",
+    ),
     (beforepatch + r".*[(]bug(\d|\s)", "use (issueDDDD) instead of bug"),
     (commitheader + r"# User [^@\n]+\n", "username is not an email address"),
-    (commitheader + r"(?!merge with )[^#]\S+[^:] ",
-     "summary line doesn't start with 'topic: '"),
+    (
+        commitheader + r"(?!merge with )[^#]\S+[^:] ",
+        "summary line doesn't start with 'topic: '",
+    ),
     (afterheader + r"[A-Z][a-z]\S+", "don't capitalize summary lines"),
     (afterheader + r"^\S+: *[A-Z][a-z]\S+", "don't capitalize summary lines"),
-    (afterheader + r"\S*[^A-Za-z0-9-_]\S*: ",
-     "summary keyword should be most user-relevant one-word command or topic"),
+    (
+        afterheader + r"\S*[^A-Za-z0-9-_]\S*: ",
+        "summary keyword should be most user-relevant one-word command or topic",
+    ),
     (afterheader + r".*\.\s*\n", "don't add trailing period on summary line"),
     (afterheader + r".{79,}", "summary line too long (limit is 78)"),
 ]
 
 word = re.compile(r'\S')
+
+
 def nonempty(first, second):
     if word.search(first):
         return first
     return second
 
+
 def checkcommit(commit, node=None):
     exitcode = 0
     printed = node is None
     hits = []
-    signtag = (afterheader +
-          r'Added (tag [^ ]+|signature) for changeset [a-f0-9]{12}')
+    signtag = (
+        afterheader + r'Added (tag [^ ]+|signature) for changeset [a-f0-9]{12}'
+    )
     if re.search(signtag, commit):
         return 0
     for exp, msg in errors:
@@ -84,9 +94,11 @@
 
     return exitcode
 
+
 def readcommit(node):
     return os.popen("hg export %s" % node).read()
 
+
 if __name__ == "__main__":
     exitcode = 0
     node = os.environ.get("HG_NODE")
--- a/contrib/clang-format-ignorelist	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/clang-format-ignorelist	Tue Jan 21 13:14:51 2020 -0500
@@ -3,100 +3,9 @@
 mercurial/cext/manifest.c
 mercurial/cext/osutil.c
 # Vendored code that we should never format:
-contrib/python-zstandard/c-ext/bufferutil.c
-contrib/python-zstandard/c-ext/compressionchunker.c
-contrib/python-zstandard/c-ext/compressiondict.c
-contrib/python-zstandard/c-ext/compressionparams.c
-contrib/python-zstandard/c-ext/compressionreader.c
-contrib/python-zstandard/c-ext/compressionwriter.c
-contrib/python-zstandard/c-ext/compressobj.c
-contrib/python-zstandard/c-ext/compressor.c
-contrib/python-zstandard/c-ext/compressoriterator.c
-contrib/python-zstandard/c-ext/constants.c
-contrib/python-zstandard/c-ext/decompressionreader.c
-contrib/python-zstandard/c-ext/decompressionwriter.c
-contrib/python-zstandard/c-ext/decompressobj.c
-contrib/python-zstandard/c-ext/decompressor.c
-contrib/python-zstandard/c-ext/decompressoriterator.c
-contrib/python-zstandard/c-ext/frameparams.c
-contrib/python-zstandard/c-ext/python-zstandard.h
-contrib/python-zstandard/zstd.c
-contrib/python-zstandard/zstd/common/bitstream.h
-contrib/python-zstandard/zstd/common/compiler.h
-contrib/python-zstandard/zstd/common/cpu.h
-contrib/python-zstandard/zstd/common/debug.c
-contrib/python-zstandard/zstd/common/debug.h
-contrib/python-zstandard/zstd/common/entropy_common.c
-contrib/python-zstandard/zstd/common/error_private.c
-contrib/python-zstandard/zstd/common/error_private.h
-contrib/python-zstandard/zstd/common/fse_decompress.c
-contrib/python-zstandard/zstd/common/fse.h
-contrib/python-zstandard/zstd/common/huf.h
-contrib/python-zstandard/zstd/common/mem.h
-contrib/python-zstandard/zstd/common/pool.c
-contrib/python-zstandard/zstd/common/pool.h
-contrib/python-zstandard/zstd/common/threading.c
-contrib/python-zstandard/zstd/common/threading.h
-contrib/python-zstandard/zstd/common/xxhash.c
-contrib/python-zstandard/zstd/common/xxhash.h
-contrib/python-zstandard/zstd/common/zstd_common.c
-contrib/python-zstandard/zstd/common/zstd_errors.h
-contrib/python-zstandard/zstd/common/zstd_internal.h
-contrib/python-zstandard/zstd/compress/fse_compress.c
-contrib/python-zstandard/zstd/compress/hist.c
-contrib/python-zstandard/zstd/compress/hist.h
-contrib/python-zstandard/zstd/compress/huf_compress.c
-contrib/python-zstandard/zstd/compress/zstd_compress.c
-contrib/python-zstandard/zstd/compress/zstd_compress_internal.h
-contrib/python-zstandard/zstd/compress/zstd_compress_literals.c
-contrib/python-zstandard/zstd/compress/zstd_compress_literals.h
-contrib/python-zstandard/zstd/compress/zstd_compress_sequences.c
-contrib/python-zstandard/zstd/compress/zstd_compress_sequences.h
-contrib/python-zstandard/zstd/compress/zstd_double_fast.c
-contrib/python-zstandard/zstd/compress/zstd_double_fast.h
-contrib/python-zstandard/zstd/compress/zstd_fast.c
-contrib/python-zstandard/zstd/compress/zstd_fast.h
-contrib/python-zstandard/zstd/compress/zstd_lazy.c
-contrib/python-zstandard/zstd/compress/zstd_lazy.h
-contrib/python-zstandard/zstd/compress/zstd_ldm.c
-contrib/python-zstandard/zstd/compress/zstd_ldm.h
-contrib/python-zstandard/zstd/compress/zstdmt_compress.c
-contrib/python-zstandard/zstd/compress/zstdmt_compress.h
-contrib/python-zstandard/zstd/compress/zstd_opt.c
-contrib/python-zstandard/zstd/compress/zstd_opt.h
-contrib/python-zstandard/zstd/decompress/huf_decompress.c
-contrib/python-zstandard/zstd/decompress/zstd_ddict.c
-contrib/python-zstandard/zstd/decompress/zstd_ddict.h
-contrib/python-zstandard/zstd/decompress/zstd_decompress_block.c
-contrib/python-zstandard/zstd/decompress/zstd_decompress_block.h
-contrib/python-zstandard/zstd/decompress/zstd_decompress_internal.h
-contrib/python-zstandard/zstd/decompress/zstd_decompress.c
-contrib/python-zstandard/zstd/deprecated/zbuff_common.c
-contrib/python-zstandard/zstd/deprecated/zbuff_compress.c
-contrib/python-zstandard/zstd/deprecated/zbuff_decompress.c
-contrib/python-zstandard/zstd/deprecated/zbuff.h
-contrib/python-zstandard/zstd/dictBuilder/cover.c
-contrib/python-zstandard/zstd/dictBuilder/cover.h
-contrib/python-zstandard/zstd/dictBuilder/divsufsort.c
-contrib/python-zstandard/zstd/dictBuilder/divsufsort.h
-contrib/python-zstandard/zstd/dictBuilder/fastcover.c
-contrib/python-zstandard/zstd/dictBuilder/zdict.c
-contrib/python-zstandard/zstd/dictBuilder/zdict.h
-contrib/python-zstandard/zstd/zstd.h
-hgext/fsmonitor/pywatchman/bser.c
-mercurial/thirdparty/xdiff/xdiff.h
-mercurial/thirdparty/xdiff/xdiffi.c
-mercurial/thirdparty/xdiff/xdiffi.h
-mercurial/thirdparty/xdiff/xemit.c
-mercurial/thirdparty/xdiff/xemit.h
-mercurial/thirdparty/xdiff/xhistogram.c
-mercurial/thirdparty/xdiff/xinclude.h
-mercurial/thirdparty/xdiff/xmacros.h
-mercurial/thirdparty/xdiff/xmerge.c
-mercurial/thirdparty/xdiff/xpatience.c
-mercurial/thirdparty/xdiff/xprepare.c
-mercurial/thirdparty/xdiff/xprepare.h
-mercurial/thirdparty/xdiff/xtypes.h
-mercurial/thirdparty/xdiff/xutils.c
-mercurial/thirdparty/xdiff/xutils.h
-mercurial/thirdparty/zope/interface/_zope_interface_coptimizations.c
+syntax: glob
+contrib/python-zstandard/**.c
+contrib/python-zstandard/**.h
+hgext/fsmonitor/pywatchman/**.c
+mercurial/thirdparty/**.c
+mercurial/thirdparty/**.h
--- a/contrib/dumprevlog	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/dumprevlog	Tue Jan 21 13:14:51 2020 -0500
@@ -11,23 +11,26 @@
     pycompat,
     revlog,
 )
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 for fp in (sys.stdin, sys.stdout, sys.stderr):
     procutil.setbinary(fp)
 
+
 def binopen(path, mode=b'rb'):
     if b'b' not in mode:
         mode = mode + b'b'
     return open(path, pycompat.sysstr(mode))
+
+
 binopen.options = {}
 
+
 def printb(data, end=b'\n'):
     sys.stdout.flush()
     pycompat.stdout.write(data + end)
 
+
 for f in sys.argv[1:]:
     r = revlog.revlog(binopen, encoding.strtolocal(f))
     print("file:", f)
--- a/contrib/examples/fix.hgrc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/examples/fix.hgrc	Tue Jan 21 13:14:51 2020 -0500
@@ -1,9 +1,14 @@
 [fix]
-clang-format:command = clang-format --style file -i
-clang-format:pattern = (**.c or **.cc or **.h) and not "listfile:contrib/clang-format-ignorelist"
+clang-format:command = clang-format --style file
+clang-format:pattern = set:(**.c or **.cc or **.h) and not "include:contrib/clang-format-ignorelist"
 
-rustfmt:command = rustfmt {rootpath}
+rustfmt:command = rustfmt +nightly
 rustfmt:pattern = set:**.rs
 
 black:command = black --config=black.toml -
 black:pattern = set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"
+
+# Mercurial doesn't have any Go code, but if we did this is how we
+# would configure `hg fix` for Go:
+go:command = gofmt
+go:pattern = set:**.go
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/FuzzedDataProvider.h	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,368 @@
+//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// A single header library providing an utility class to break up an array of
+// bytes. Whenever run on the same input, provides the same output, as long as
+// its methods are called in the same order, with the same arguments.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
+#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
+
+#include <algorithm>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <initializer_list>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+// In addition to the comments below, the API is also briefly documented at
+// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider
+class FuzzedDataProvider
+{
+      public:
+	// |data| is an array of length |size| that the FuzzedDataProvider wraps
+	// to provide more granular access. |data| must outlive the
+	// FuzzedDataProvider.
+	FuzzedDataProvider(const uint8_t *data, size_t size)
+	    : data_ptr_(data), remaining_bytes_(size)
+	{
+	}
+	~FuzzedDataProvider() = default;
+
+	// Returns a std::vector containing |num_bytes| of input data. If fewer
+	// than |num_bytes| of data remain, returns a shorter std::vector
+	// containing all of the data that's left. Can be used with any byte
+	// sized type, such as char, unsigned char, uint8_t, etc.
+	template <typename T> std::vector<T> ConsumeBytes(size_t num_bytes)
+	{
+		num_bytes = std::min(num_bytes, remaining_bytes_);
+		return ConsumeBytes<T>(num_bytes, num_bytes);
+	}
+
+	// Similar to |ConsumeBytes|, but also appends the terminator value at
+	// the end of the resulting vector. Useful, when a mutable
+	// null-terminated C-string is needed, for example. But that is a rare
+	// case. Better avoid it, if possible, and prefer using |ConsumeBytes|
+	// or |ConsumeBytesAsString| methods.
+	template <typename T>
+	std::vector<T> ConsumeBytesWithTerminator(size_t num_bytes,
+	                                          T terminator = 0)
+	{
+		num_bytes = std::min(num_bytes, remaining_bytes_);
+		std::vector<T> result =
+		    ConsumeBytes<T>(num_bytes + 1, num_bytes);
+		result.back() = terminator;
+		return result;
+	}
+
+	// Returns a std::string containing |num_bytes| of input data. Using
+	// this and
+	// |.c_str()| on the resulting string is the best way to get an
+	// immutable null-terminated C string. If fewer than |num_bytes| of data
+	// remain, returns a shorter std::string containing all of the data
+	// that's left.
+	std::string ConsumeBytesAsString(size_t num_bytes)
+	{
+		static_assert(sizeof(std::string::value_type) ==
+		                  sizeof(uint8_t),
+		              "ConsumeBytesAsString cannot convert the data to "
+		              "a string.");
+
+		num_bytes = std::min(num_bytes, remaining_bytes_);
+		std::string result(
+		    reinterpret_cast<const std::string::value_type *>(
+		        data_ptr_),
+		    num_bytes);
+		Advance(num_bytes);
+		return result;
+	}
+
+	// Returns a number in the range [min, max] by consuming bytes from the
+	// input data. The value might not be uniformly distributed in the given
+	// range. If there's no input data left, always returns |min|. |min|
+	// must be less than or equal to |max|.
+	template <typename T> T ConsumeIntegralInRange(T min, T max)
+	{
+		static_assert(std::is_integral<T>::value,
+		              "An integral type is required.");
+		static_assert(sizeof(T) <= sizeof(uint64_t),
+		              "Unsupported integral type.");
+
+		if (min > max)
+			abort();
+
+		// Use the biggest type possible to hold the range and the
+		// result.
+		uint64_t range = static_cast<uint64_t>(max) - min;
+		uint64_t result = 0;
+		size_t offset = 0;
+
+		while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 &&
+		       remaining_bytes_ != 0) {
+			// Pull bytes off the end of the seed data.
+			// Experimentally, this seems to allow the fuzzer to
+			// more easily explore the input space. This makes
+			// sense, since it works by modifying inputs that caused
+			// new code to run, and this data is often used to
+			// encode length of data read by |ConsumeBytes|.
+			// Separating out read lengths makes it easier modify
+			// the contents of the data that is actually read.
+			--remaining_bytes_;
+			result =
+			    (result << CHAR_BIT) | data_ptr_[remaining_bytes_];
+			offset += CHAR_BIT;
+		}
+
+		// Avoid division by 0, in case |range + 1| results in overflow.
+		if (range != std::numeric_limits<decltype(range)>::max())
+			result = result % (range + 1);
+
+		return static_cast<T>(min + result);
+	}
+
+	// Returns a std::string of length from 0 to |max_length|. When it runs
+	// out of input data, returns what remains of the input. Designed to be
+	// more stable with respect to a fuzzer inserting characters than just
+	// picking a random length and then consuming that many bytes with
+	// |ConsumeBytes|.
+	std::string ConsumeRandomLengthString(size_t max_length)
+	{
+		// Reads bytes from the start of |data_ptr_|. Maps "\\" to "\",
+		// and maps "\" followed by anything else to the end of the
+		// string. As a result of this logic, a fuzzer can insert
+		// characters into the string, and the string will be lengthened
+		// to include those new characters, resulting in a more stable
+		// fuzzer than picking the length of a string independently from
+		// picking its contents.
+		std::string result;
+
+		// Reserve the anticipated capaticity to prevent several
+		// reallocations.
+		result.reserve(std::min(max_length, remaining_bytes_));
+		for (size_t i = 0; i < max_length && remaining_bytes_ != 0;
+		     ++i) {
+			char next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
+			Advance(1);
+			if (next == '\\' && remaining_bytes_ != 0) {
+				next =
+				    ConvertUnsignedToSigned<char>(data_ptr_[0]);
+				Advance(1);
+				if (next != '\\')
+					break;
+			}
+			result += next;
+		}
+
+		result.shrink_to_fit();
+		return result;
+	}
+
+	// Returns a std::vector containing all remaining bytes of the input
+	// data.
+	template <typename T> std::vector<T> ConsumeRemainingBytes()
+	{
+		return ConsumeBytes<T>(remaining_bytes_);
+	}
+
+	// Returns a std::string containing all remaining bytes of the input
+	// data. Prefer using |ConsumeRemainingBytes| unless you actually need a
+	// std::string object.
+	std::string ConsumeRemainingBytesAsString()
+	{
+		return ConsumeBytesAsString(remaining_bytes_);
+	}
+
+	// Returns a number in the range [Type's min, Type's max]. The value
+	// might not be uniformly distributed in the given range. If there's no
+	// input data left, always returns |min|.
+	template <typename T> T ConsumeIntegral()
+	{
+		return ConsumeIntegralInRange(std::numeric_limits<T>::min(),
+		                              std::numeric_limits<T>::max());
+	}
+
+	// Reads one byte and returns a bool, or false when no data remains.
+	bool ConsumeBool()
+	{
+		return 1 & ConsumeIntegral<uint8_t>();
+	}
+
+	// Returns a copy of the value selected from the given fixed-size
+	// |array|.
+	template <typename T, size_t size>
+	T PickValueInArray(const T (&array)[size])
+	{
+		static_assert(size > 0, "The array must be non empty.");
+		return array[ConsumeIntegralInRange<size_t>(0, size - 1)];
+	}
+
+	template <typename T>
+	T PickValueInArray(std::initializer_list<const T> list)
+	{
+		// TODO(Dor1s): switch to static_assert once C++14 is allowed.
+		if (!list.size())
+			abort();
+
+		return *(list.begin() +
+		         ConsumeIntegralInRange<size_t>(0, list.size() - 1));
+	}
+
+	// Returns an enum value. The enum must start at 0 and be contiguous. It
+	// must also contain |kMaxValue| aliased to its largest (inclusive)
+	// value. Such as: enum class Foo { SomeValue, OtherValue, kMaxValue =
+	// OtherValue };
+	template <typename T> T ConsumeEnum()
+	{
+		static_assert(std::is_enum<T>::value,
+		              "|T| must be an enum type.");
+		return static_cast<T>(ConsumeIntegralInRange<uint32_t>(
+		    0, static_cast<uint32_t>(T::kMaxValue)));
+	}
+
+	// Returns a floating point number in the range [0.0, 1.0]. If there's
+	// no input data left, always returns 0.
+	template <typename T> T ConsumeProbability()
+	{
+		static_assert(std::is_floating_point<T>::value,
+		              "A floating point type is required.");
+
+		// Use different integral types for different floating point
+		// types in order to provide better density of the resulting
+		// values.
+		using IntegralType =
+		    typename std::conditional<(sizeof(T) <= sizeof(uint32_t)),
+		                              uint32_t, uint64_t>::type;
+
+		T result = static_cast<T>(ConsumeIntegral<IntegralType>());
+		result /=
+		    static_cast<T>(std::numeric_limits<IntegralType>::max());
+		return result;
+	}
+
+	// Returns a floating point value in the range [Type's lowest, Type's
+	// max] by consuming bytes from the input data. If there's no input data
+	// left, always returns approximately 0.
+	template <typename T> T ConsumeFloatingPoint()
+	{
+		return ConsumeFloatingPointInRange<T>(
+		    std::numeric_limits<T>::lowest(),
+		    std::numeric_limits<T>::max());
+	}
+
+	// Returns a floating point value in the given range by consuming bytes
+	// from the input data. If there's no input data left, returns |min|.
+	// Note that |min| must be less than or equal to |max|.
+	template <typename T> T ConsumeFloatingPointInRange(T min, T max)
+	{
+		if (min > max)
+			abort();
+
+		T range = .0;
+		T result = min;
+		constexpr T zero(.0);
+		if (max > zero && min < zero &&
+		    max > min + std::numeric_limits<T>::max()) {
+			// The diff |max - min| would overflow the given
+			// floating point type. Use the half of the diff as the
+			// range and consume a bool to decide whether the result
+			// is in the first of the second part of the diff.
+			range = (max / 2.0) - (min / 2.0);
+			if (ConsumeBool()) {
+				result += range;
+			}
+		} else {
+			range = max - min;
+		}
+
+		return result + range * ConsumeProbability<T>();
+	}
+
+	// Reports the remaining bytes available for fuzzed input.
+	size_t remaining_bytes()
+	{
+		return remaining_bytes_;
+	}
+
+      private:
+	FuzzedDataProvider(const FuzzedDataProvider &) = delete;
+	FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete;
+
+	void Advance(size_t num_bytes)
+	{
+		if (num_bytes > remaining_bytes_)
+			abort();
+
+		data_ptr_ += num_bytes;
+		remaining_bytes_ -= num_bytes;
+	}
+
+	template <typename T>
+	std::vector<T> ConsumeBytes(size_t size, size_t num_bytes_to_consume)
+	{
+		static_assert(sizeof(T) == sizeof(uint8_t),
+		              "Incompatible data type.");
+
+		// The point of using the size-based constructor below is to
+		// increase the odds of having a vector object with capacity
+		// being equal to the length. That part is always implementation
+		// specific, but at least both libc++ and libstdc++ allocate the
+		// requested number of bytes in that constructor, which seems to
+		// be a natural choice for other implementations as well. To
+		// increase the odds even more, we also call |shrink_to_fit|
+		// below.
+		std::vector<T> result(size);
+		if (size == 0) {
+			if (num_bytes_to_consume != 0)
+				abort();
+			return result;
+		}
+
+		std::memcpy(result.data(), data_ptr_, num_bytes_to_consume);
+		Advance(num_bytes_to_consume);
+
+		// Even though |shrink_to_fit| is also implementation specific,
+		// we expect it to provide an additional assurance in case
+		// vector's constructor allocated a buffer which is larger than
+		// the actual amount of data we put inside it.
+		result.shrink_to_fit();
+		return result;
+	}
+
+	template <typename TS, typename TU> TS ConvertUnsignedToSigned(TU value)
+	{
+		static_assert(sizeof(TS) == sizeof(TU),
+		              "Incompatible data types.");
+		static_assert(!std::numeric_limits<TU>::is_signed,
+		              "Source type must be unsigned.");
+
+		// TODO(Dor1s): change to `if constexpr` once C++17 becomes
+		// mainstream.
+		if (std::numeric_limits<TS>::is_modulo)
+			return static_cast<TS>(value);
+
+		// Avoid using implementation-defined unsigned to signer
+		// conversions. To learn more, see
+		// https://stackoverflow.com/questions/13150449.
+		if (value <= std::numeric_limits<TS>::max()) {
+			return static_cast<TS>(value);
+		} else {
+			constexpr auto TS_min = std::numeric_limits<TS>::min();
+			return TS_min + static_cast<char>(value - TS_min);
+		}
+	}
+
+	const uint8_t *data_ptr_;
+	size_t remaining_bytes_;
+};
+
+#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
+// no-check-code since this is from a third party
--- a/contrib/fuzz/Makefile	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/Makefile	Tue Jan 21 13:14:51 2020 -0500
@@ -1,184 +1,129 @@
 CC = clang
 CXX = clang++
 
-all: bdiff mpatch xdiff
+# By default, use our own standalone_fuzz_target_runner.
+# This runner does no fuzzing, but simply executes the inputs
+# provided via parameters.
+# Run e.g. "make all LIB_FUZZING_ENGINE=/path/to/libFuzzer.a"
+# to link the fuzzer(s) against a real fuzzing engine.
+#
+# OSS-Fuzz will define its own value for LIB_FUZZING_ENGINE.
+LIB_FUZZING_ENGINE ?= standalone_fuzz_target_runner.o
 
-fuzzutil.o: fuzzutil.cc fuzzutil.h
-	$(CXX) $(CXXFLAGS) -g -O1 \
-	  -std=c++17 \
-	  -I../../mercurial -c -o fuzzutil.o fuzzutil.cc
+PYTHON_CONFIG ?= $$OUT/sanpy/bin/python-config
+
+CXXFLAGS += -Wno-deprecated-register
 
-fuzzutil-oss-fuzz.o: fuzzutil.cc fuzzutil.h
-	$(CXX) $(CXXFLAGS) -std=c++17 \
-	  -I../../mercurial -c -o fuzzutil-oss-fuzz.o fuzzutil.cc
+all: standalone_fuzz_target_runner.o oss-fuzz
+
+standalone_fuzz_target_runner.o: standalone_fuzz_target_runner.cc
+
+$$OUT/%_fuzzer_seed_corpus.zip: %_corpus.py
+	python $< $@
 
 pyutil.o: pyutil.cc pyutil.h
 	$(CXX) $(CXXFLAGS) -g -O1 \
-	  `$$OUT/sanpy/bin/python-config --cflags` \
+	  `$(PYTHON_CONFIG) --cflags` \
 	  -I../../mercurial -c -o pyutil.o pyutil.cc
 
-bdiff.o: ../../mercurial/bdiff.c
-	$(CC) $(CFLAGS) -fsanitize=fuzzer-no-link,address -c -o bdiff.o \
-	  ../../mercurial/bdiff.c
-
-bdiff: bdiff.cc bdiff.o fuzzutil.o
-	$(CXX) $(CXXFLAGS) -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \
-	  -std=c++17 \
-	  -I../../mercurial bdiff.cc bdiff.o fuzzutil.o -o bdiff
-
 bdiff-oss-fuzz.o: ../../mercurial/bdiff.c
 	$(CC) $(CFLAGS) -c -o bdiff-oss-fuzz.o ../../mercurial/bdiff.c
 
-bdiff_fuzzer: bdiff.cc bdiff-oss-fuzz.o fuzzutil-oss-fuzz.o
+bdiff_fuzzer: bdiff.cc bdiff-oss-fuzz.o
 	$(CXX) $(CXXFLAGS) -std=c++17 -I../../mercurial bdiff.cc \
-	  bdiff-oss-fuzz.o fuzzutil-oss-fuzz.o -lFuzzingEngine -o \
+	  bdiff-oss-fuzz.o $(LIB_FUZZING_ENGINE) -o \
 	  $$OUT/bdiff_fuzzer
 
 mpatch.o: ../../mercurial/mpatch.c
 	$(CC) -g -O1 -fsanitize=fuzzer-no-link,address -c -o mpatch.o \
 	  ../../mercurial/mpatch.c
 
-mpatch: CXXFLAGS += -std=c++17
-mpatch: mpatch.cc mpatch.o fuzzutil.o
-	$(CXX) $(CXXFLAGS) -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \
-	  -I../../mercurial mpatch.cc mpatch.o fuzzutil.o -o mpatch
-
 mpatch-oss-fuzz.o: ../../mercurial/mpatch.c
 	$(CC) $(CFLAGS) -c -o mpatch-oss-fuzz.o ../../mercurial/mpatch.c
 
-mpatch_fuzzer: mpatch.cc mpatch-oss-fuzz.o fuzzutil-oss-fuzz.o
+mpatch_fuzzer: mpatch.cc mpatch-oss-fuzz.o $$OUT/mpatch_fuzzer_seed_corpus.zip
 	$(CXX) $(CXXFLAGS) -std=c++17 -I../../mercurial mpatch.cc \
-	  mpatch-oss-fuzz.o fuzzutil-oss-fuzz.o -lFuzzingEngine -o \
+	  mpatch-oss-fuzz.o $(LIB_FUZZING_ENGINE) -o \
 	  $$OUT/mpatch_fuzzer
 
-mpatch_corpus.zip:
-	python mpatch_corpus.py $$OUT/mpatch_fuzzer_seed_corpus.zip
-
-x%.o: ../../mercurial/thirdparty/xdiff/x%.c ../../mercurial/thirdparty/xdiff/*.h
-	$(CC) -g -O1 -fsanitize=fuzzer-no-link,address -c \
-	  -o $@ \
-	  $<
-
-xdiff: CXXFLAGS += -std=c++17
-xdiff: xdiff.cc xdiffi.o xprepare.o xutils.o fuzzutil.o
-	$(CXX) $(CXXFLAGS) -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \
-	  -I../../mercurial xdiff.cc \
-	  xdiffi.o xprepare.o xutils.o fuzzutil.o -o xdiff
-
 fuzz-x%.o: ../../mercurial/thirdparty/xdiff/x%.c ../../mercurial/thirdparty/xdiff/*.h
 	$(CC) $(CFLAGS) -c \
 	  -o $@ \
 	  $<
 
-xdiff_fuzzer: xdiff.cc fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o fuzzutil-oss-fuzz.o
+xdiff_fuzzer: xdiff.cc fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o 
 	$(CXX) $(CXXFLAGS) -std=c++17 -I../../mercurial xdiff.cc \
-	  fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o fuzzutil-oss-fuzz.o \
-	  -lFuzzingEngine -o $$OUT/xdiff_fuzzer
-
-manifest.o: ../../mercurial/cext/manifest.c
-	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-	  -I../../mercurial \
-	  -c -o manifest.o ../../mercurial/cext/manifest.c
-
-charencode.o: ../../mercurial/cext/charencode.c
-	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-	  -I../../mercurial \
-	  -c -o charencode.o ../../mercurial/cext/charencode.c
+	  fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o \
+	  $(LIB_FUZZING_ENGINE) -o $$OUT/xdiff_fuzzer
 
-parsers.o: ../../mercurial/cext/parsers.c
-	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-	  -I../../mercurial \
-	  -c -o parsers.o ../../mercurial/cext/parsers.c
-
-dirs.o: ../../mercurial/cext/dirs.c
-	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-	  -I../../mercurial \
-	  -c -o dirs.o ../../mercurial/cext/dirs.c
+parsers-%.o: ../../mercurial/cext/%.c
+	$(CC) -I../../mercurial `$(PYTHON_CONFIG) --cflags` $(CFLAGS) -c \
+	  -o $@ $<
 
-pathencode.o: ../../mercurial/cext/pathencode.c
-	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-	  -I../../mercurial \
-	  -c -o pathencode.o ../../mercurial/cext/pathencode.c
+PARSERS_OBJS=parsers-manifest.o parsers-charencode.o parsers-parsers.o parsers-dirs.o parsers-pathencode.o parsers-revlog.o
 
-revlog.o: ../../mercurial/cext/revlog.c
-	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-	  -I../../mercurial \
-	  -c -o revlog.o ../../mercurial/cext/revlog.c
-
-dirs_fuzzer: dirs.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
-	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+dirs_fuzzer: dirs.cc pyutil.o $(PARSERS_OBJS) $$OUT/dirs_fuzzer_seed_corpus.zip
+	$(CXX) $(CXXFLAGS) `$(PYTHON_CONFIG) --cflags` \
 	  -Wno-register -Wno-macro-redefined \
 	  -I../../mercurial dirs.cc \
-	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
-	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  pyutil.o $(PARSERS_OBJS) \
+	  $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
 	  -o $$OUT/dirs_fuzzer
 
-fncache_fuzzer: fncache.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
-	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+fncache_fuzzer: fncache.cc 
+	$(CXX) $(CXXFLAGS) `$(PYTHON_CONFIG) --cflags` \
 	  -Wno-register -Wno-macro-redefined \
 	  -I../../mercurial fncache.cc \
-	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
-	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  pyutil.o $(PARSERS_OBJS) \
+	  $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
 	  -o $$OUT/fncache_fuzzer
 
-jsonescapeu8fast_fuzzer: jsonescapeu8fast.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
-	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+jsonescapeu8fast_fuzzer: jsonescapeu8fast.cc pyutil.o $(PARSERS_OBJS)
+	$(CXX) $(CXXFLAGS) `$(PYTHON_CONFIG) --cflags` \
 	  -Wno-register -Wno-macro-redefined \
 	  -I../../mercurial jsonescapeu8fast.cc \
-	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
-	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  pyutil.o $(PARSERS_OBJS) \
+	  $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
 	  -o $$OUT/jsonescapeu8fast_fuzzer
 
-manifest_corpus.zip:
-	python manifest_corpus.py $$OUT/manifest_fuzzer_seed_corpus.zip
-
-manifest_fuzzer: manifest.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
-	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+manifest_fuzzer: manifest.cc pyutil.o $(PARSERS_OBJS) $$OUT/manifest_fuzzer_seed_corpus.zip
+	$(CXX) $(CXXFLAGS) `$(PYTHON_CONFIG) --cflags` \
 	  -Wno-register -Wno-macro-redefined \
 	  -I../../mercurial manifest.cc \
-	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
-	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  pyutil.o $(PARSERS_OBJS) \
+	  $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
 	  -o $$OUT/manifest_fuzzer
 
-revlog_fuzzer: revlog.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
-	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+revlog_fuzzer: revlog.cc pyutil.o $(PARSERS_OBJS) $$OUT/revlog_fuzzer_seed_corpus.zip
+	$(CXX) $(CXXFLAGS) `$(PYTHON_CONFIG) --cflags` \
 	  -Wno-register -Wno-macro-redefined \
 	  -I../../mercurial revlog.cc \
-	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
-	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  pyutil.o $(PARSERS_OBJS) \
+	  $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
 	  -o $$OUT/revlog_fuzzer
 
-revlog_corpus.zip:
-	python revlog_corpus.py $$OUT/revlog_fuzzer_seed_corpus.zip
-
-dirstate_fuzzer: dirstate.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
-	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+dirstate_fuzzer: dirstate.cc pyutil.o $(PARSERS_OBJS) $$OUT/dirstate_fuzzer_seed_corpus.zip
+	$(CXX) $(CXXFLAGS) `$(PYTHON_CONFIG) --cflags` \
 	  -Wno-register -Wno-macro-redefined \
 	  -I../../mercurial dirstate.cc \
-	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
-	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  pyutil.o $(PARSERS_OBJS) \
+	  $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
 	  -o $$OUT/dirstate_fuzzer
 
-dirstate_corpus.zip:
-	python dirstate_corpus.py $$OUT/dirstate_fuzzer_seed_corpus.zip
-
-fm1readmarkers_fuzzer: fm1readmarkers.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
-	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+fm1readmarkers_fuzzer: fm1readmarkers.cc pyutil.o $(PARSERS_OBJS) $$OUT/fm1readmarkers_fuzzer_seed_corpus.zip
+	$(CXX) $(CXXFLAGS) `$(PYTHON_CONFIG) --cflags` \
 	  -Wno-register -Wno-macro-redefined \
 	  -I../../mercurial fm1readmarkers.cc \
-	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
-	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  pyutil.o $(PARSERS_OBJS) \
+	  $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
 	  -o $$OUT/fm1readmarkers_fuzzer
 
-fm1readmarkers_corpus.zip:
-	python fm1readmarkers_corpus.py $$OUT/fm1readmarkers_fuzzer_seed_corpus.zip
-
 clean:
 	$(RM) *.o *_fuzzer \
 	  bdiff \
 	  mpatch \
 	  xdiff
 
-oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer dirs_fuzzer fncache_fuzzer jsonescapeu8fast_fuzzer manifest_fuzzer manifest_corpus.zip revlog_fuzzer revlog_corpus.zip dirstate_fuzzer dirstate_corpus.zip fm1readmarkers_fuzzer fm1readmarkers_corpus.zip
+oss-fuzz: bdiff_fuzzer mpatch_fuzzer xdiff_fuzzer dirs_fuzzer fncache_fuzzer jsonescapeu8fast_fuzzer manifest_fuzzer revlog_fuzzer dirstate_fuzzer fm1readmarkers_fuzzer
 
 .PHONY: all clean oss-fuzz
--- a/contrib/fuzz/bdiff.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/bdiff.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -9,22 +9,25 @@
 #include <memory>
 #include <stdlib.h>
 
-#include "fuzzutil.h"
+#include "FuzzedDataProvider.h"
 
 extern "C" {
 #include "bdiff.h"
 
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+	return 0;
+}
+
 int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
 {
-	auto maybe_inputs = SplitInputs(Data, Size);
-	if (!maybe_inputs) {
-		return 0;
-	}
-	auto inputs = std::move(maybe_inputs.value());
+	FuzzedDataProvider provider(Data, Size);
+	std::string left = provider.ConsumeRandomLengthString(Size);
+	std::string right = provider.ConsumeRemainingBytesAsString();
 
 	struct bdiff_line *a, *b;
-	int an = bdiff_splitlines(inputs.left.get(), inputs.left_size, &a);
-	int bn = bdiff_splitlines(inputs.right.get(), inputs.right_size, &b);
+	int an = bdiff_splitlines(left.c_str(), left.size(), &a);
+	int bn = bdiff_splitlines(right.c_str(), right.size(), &b);
 	struct bdiff_hunk l;
 	bdiff_diff(a, an, b, bn, &l);
 	free(a);
@@ -33,12 +36,4 @@
 	return 0; // Non-zero return values are reserved for future use.
 }
 
-#ifdef HG_FUZZER_INCLUDE_MAIN
-int main(int argc, char **argv)
-{
-	const char data[] = "asdf";
-	return LLVMFuzzerTestOneInput((const uint8_t *)data, 4);
-}
-#endif
-
 } // extern "C"
--- a/contrib/fuzz/dirs.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/dirs.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -9,16 +9,15 @@
 
 extern "C" {
 
-static PyCodeObject *code;
+static PYCODETYPE *code;
 
 extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
 {
 	contrib::initpy(*argv[0]);
-	code = (PyCodeObject *)Py_CompileString(R"py(
-from parsers import dirs
+	code = (PYCODETYPE *)Py_CompileString(R"py(
 try:
   files = mdata.split('\n')
-  d = dirs(files)
+  d = parsers.dirs(files)
   list(d)
   'a' in d
   if files:
@@ -29,7 +28,7 @@
   # to debug failures.
   # print e
 )py",
-	                                        "fuzzer", Py_file_input);
+	                                      "fuzzer", Py_file_input);
 	return 0;
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/dirs_corpus.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,29 @@
+from __future__ import absolute_import, print_function
+
+import argparse
+import zipfile
+
+ap = argparse.ArgumentParser()
+ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
+args = ap.parse_args()
+
+with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
+    zf.writestr(
+        "greek-tree",
+        "\n".join(
+            [
+                "iota",
+                "A/mu",
+                "A/B/lambda",
+                "A/B/E/alpha",
+                "A/B/E/beta",
+                "A/D/gamma",
+                "A/D/G/pi",
+                "A/D/G/rho",
+                "A/D/G/tau",
+                "A/D/H/chi",
+                "A/D/H/omega",
+                "A/D/H/psi",
+            ]
+        ),
+    )
--- a/contrib/fuzz/dirstate.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/dirstate.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -9,24 +9,23 @@
 
 extern "C" {
 
-static PyCodeObject *code;
+static PYCODETYPE *code;
 
 extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
 {
 	contrib::initpy(*argv[0]);
-	code = (PyCodeObject *)Py_CompileString(R"py(
-from parsers import parse_dirstate
+	code = (PYCODETYPE *)Py_CompileString(R"py(
 try:
     dmap = {}
     copymap = {}
-    p = parse_dirstate(dmap, copymap, data)
+    p = parsers.parse_dirstate(dmap, copymap, data)
 except Exception as e:
     pass
     # uncomment this print if you're editing this Python code
     # to debug failures.
     # print e
 )py",
-	                                        "fuzzer", Py_file_input);
+	                                      "fuzzer", Py_file_input);
 	return 0;
 }
 
--- a/contrib/fuzz/dirstate_corpus.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/dirstate_corpus.py	Tue Jan 21 13:14:51 2020 -0500
@@ -13,5 +13,5 @@
 
 with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
     if os.path.exists(dirstate):
-        with open(dirstate) as f:
+        with open(dirstate, 'rb') as f:
             zf.writestr("dirstate", f.read())
--- a/contrib/fuzz/fm1readmarkers.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/fm1readmarkers.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -9,13 +9,12 @@
 
 extern "C" {
 
-static PyCodeObject *code;
+static PYCODETYPE *code;
 
 extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
 {
 	contrib::initpy(*argv[0]);
-	code = (PyCodeObject *)Py_CompileString(R"py(
-from parsers import fm1readmarkers
+	code = (PYCODETYPE *)Py_CompileString(R"py(
 def maybeint(s, default):
     try:
         return int(s)
@@ -31,14 +30,14 @@
     else:
         offset = stop = 0
     offset, stop = maybeint(offset, 0), maybeint(stop, len(data))
-    fm1readmarkers(data, offset, stop)
+    parsers.fm1readmarkers(data, offset, stop)
 except Exception as e:
     pass
     # uncomment this print if you're editing this Python code
     # to debug failures.
     # print e
 )py",
-	                                        "fuzzer", Py_file_input);
+	                                      "fuzzer", Py_file_input);
 	return 0;
 }
 
--- a/contrib/fuzz/fncache.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/fncache.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -10,29 +10,20 @@
 
 extern "C" {
 
-static PyCodeObject *code;
+static PYCODETYPE *code;
 
 extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
 {
 	contrib::initpy(*argv[0]);
-	code = (PyCodeObject *)Py_CompileString(R"py(
-from parsers import (
-    isasciistr,
-    asciilower,
-    asciiupper,
-    encodedir,
-    pathencode,
-    lowerencode,
-)
-
+	code = (PYCODETYPE *)Py_CompileString(R"py(
 try:
     for fn in (
-        isasciistr,
-        asciilower,
-        asciiupper,
-        encodedir,
-        pathencode,
-        lowerencode,
+        parsers.isasciistr,
+        parsers.asciilower,
+        parsers.asciiupper,
+        parsers.encodedir,
+        parsers.pathencode,
+        parsers.lowerencode,
     ):
         try:
             fn(data)
@@ -53,7 +44,7 @@
     # to debug failures.
     # print(e)
 )py",
-	                                        "fuzzer", Py_file_input);
+	                                      "fuzzer", Py_file_input);
 	if (!code) {
 		std::cerr << "failed to compile Python code!" << std::endl;
 	}
--- a/contrib/fuzz/fuzzutil.cc	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-#include "fuzzutil.h"
-
-#include <cstring>
-#include <utility>
-
-contrib::optional<two_inputs> SplitInputs(const uint8_t *Data, size_t Size)
-{
-	if (!Size) {
-		return contrib::nullopt;
-	}
-	// figure out a random point in [0, Size] to split our input.
-	size_t left_size = (Data[0] / 255.0) * (Size - 1);
-
-	// Copy inputs to new allocations so if bdiff over-reads
-	// AddressSanitizer can detect it.
-	std::unique_ptr<char[]> left(new char[left_size]);
-	std::memcpy(left.get(), Data + 1, left_size);
-	// right starts at the next byte after left ends
-	size_t right_size = Size - (left_size + 1);
-	std::unique_ptr<char[]> right(new char[right_size]);
-	std::memcpy(right.get(), Data + 1 + left_size, right_size);
-	LOG(2) << "inputs are  " << left_size << " and " << right_size
-	       << " bytes" << std::endl;
-	two_inputs result = {std::move(right), right_size, std::move(left),
-	                     left_size};
-	return result;
-}
--- a/contrib/fuzz/fuzzutil.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/fuzzutil.h	Tue Jan 21 13:14:51 2020 -0500
@@ -34,14 +34,4 @@
 	if (level <= DEBUG)                                                    \
 	std::cout
 
-struct two_inputs {
-	std::unique_ptr<char[]> right;
-	size_t right_size;
-	std::unique_ptr<char[]> left;
-	size_t left_size;
-};
-
-/* Split a non-zero-length input into two inputs. */
-contrib::optional<two_inputs> SplitInputs(const uint8_t *Data, size_t Size);
-
 #endif /* CONTRIB_FUZZ_FUZZUTIL_H */
--- a/contrib/fuzz/jsonescapeu8fast.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/jsonescapeu8fast.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -5,29 +5,27 @@
 
 #include "pyutil.h"
 
-#include <fuzzer/FuzzedDataProvider.h>
 #include <iostream>
 #include <string>
+#include "FuzzedDataProvider.h"
 
 extern "C" {
 
-static PyCodeObject *code;
+static PYCODETYPE *code;
 
 extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
 {
 	contrib::initpy(*argv[0]);
-	code = (PyCodeObject *)Py_CompileString(R"py(
-from parsers import jsonescapeu8fast
-
+	code = (PYCODETYPE *)Py_CompileString(R"py(
 try:
-    jsonescapeu8fast(data, paranoid)
+    parsers.jsonescapeu8fast(data, paranoid)
 except Exception as e:
     pass
     # uncomment this print if you're editing this Python code
     # to debug failures.
     # print(e)
 )py",
-	                                        "fuzzer", Py_file_input);
+	                                      "fuzzer", Py_file_input);
 	if (!code) {
 		std::cerr << "failed to compile Python code!" << std::endl;
 	}
--- a/contrib/fuzz/manifest.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/manifest.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -9,15 +9,14 @@
 
 extern "C" {
 
-static PyCodeObject *code;
+static PYCODETYPE *code;
 
 extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
 {
 	contrib::initpy(*argv[0]);
-	code = (PyCodeObject *)Py_CompileString(R"py(
-from parsers import lazymanifest
+	code = (PYCODETYPE *)Py_CompileString(R"py(
 try:
-  lm = lazymanifest(mdata)
+  lm = parsers.lazymanifest(mdata)
   # iterate the whole thing, which causes the code to fully parse
   # every line in the manifest
   for e, _, _ in lm.iterentries():
@@ -41,7 +40,7 @@
   # to debug failures.
   # print e
 )py",
-	                                        "fuzzer", Py_file_input);
+	                                      "fuzzer", Py_file_input);
 	return 0;
 }
 
--- a/contrib/fuzz/mpatch.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/mpatch.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -14,6 +14,11 @@
 
 #include "fuzzutil.h"
 
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+	return 0;
+}
+
 // To avoid having too many OOMs from the fuzzer infrastructure, we'll
 // skip patch application if the resulting fulltext would be bigger
 // than 10MiB.
@@ -106,17 +111,4 @@
 	return 0;
 }
 
-#ifdef HG_FUZZER_INCLUDE_MAIN
-int main(int argc, char **argv)
-{
-	// One text, one patch.
-	const char data[] = "\x02\x00\0x1\x00\x0d"
-	                    // base text
-	                    "a"
-	                    // binary delta that will append a single b
-	                    "\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01b";
-	return LLVMFuzzerTestOneInput((const uint8_t *)data, 19);
-}
-#endif
-
 } // extern "C"
--- a/contrib/fuzz/mpatch_corpus.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/mpatch_corpus.py	Tue Jan 21 13:14:51 2020 -0500
@@ -2,6 +2,7 @@
 
 import argparse
 import struct
+import sys
 import zipfile
 
 from mercurial import (
@@ -14,34 +15,48 @@
 args = ap.parse_args()
 
 
-class deltafrag(object):
+if sys.version_info[0] < 3:
+
+    class py2reprhack(object):
+        def __repr__(self):
+            """Py2 calls __repr__ for `bytes(foo)`, forward to __bytes__"""
+            return self.__bytes__()
+
+
+else:
+
+    class py2reprhack(object):
+        """Not needed on py3."""
+
+
+class deltafrag(py2reprhack):
     def __init__(self, start, end, data):
         self.start = start
         self.end = end
         self.data = data
 
-    def __str__(self):
+    def __bytes__(self):
         return (
             struct.pack(">lll", self.start, self.end, len(self.data))
             + self.data
         )
 
 
-class delta(object):
+class delta(py2reprhack):
     def __init__(self, frags):
         self.frags = frags
 
-    def __str__(self):
-        return ''.join(str(f) for f in self.frags)
+    def __bytes__(self):
+        return b''.join(bytes(f) for f in self.frags)
 
 
-class corpus(object):
+class corpus(py2reprhack):
     def __init__(self, base, deltas):
         self.base = base
         self.deltas = deltas
 
-    def __str__(self):
-        deltas = [str(d) for d in self.deltas]
+    def __bytes__(self):
+        deltas = [bytes(d) for d in self.deltas]
         parts = (
             [
                 struct.pack(">B", len(deltas) + 1),
@@ -51,300 +66,301 @@
             + [self.base]
             + deltas
         )
-        return "".join(parts)
+        return b''.join(parts)
 
 
 with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
     # Manually constructed entries
     zf.writestr(
-        "one_delta_applies", str(corpus('a', [delta([deltafrag(0, 1, 'b')])]))
+        "one_delta_applies",
+        bytes(corpus(b'a', [delta([deltafrag(0, 1, b'b')])])),
     )
     zf.writestr(
         "one_delta_starts_late",
-        str(corpus('a', [delta([deltafrag(3, 1, 'b')])])),
+        bytes(corpus(b'a', [delta([deltafrag(3, 1, b'b')])])),
     )
     zf.writestr(
         "one_delta_ends_late",
-        str(corpus('a', [delta([deltafrag(0, 20, 'b')])])),
+        bytes(corpus(b'a', [delta([deltafrag(0, 20, b'b')])])),
     )
 
     try:
         # Generated from repo data
-        r = hg.repository(uimod.ui(), '../..')
-        fl = r.file('mercurial/manifest.py')
+        r = hg.repository(uimod.ui(), b'../..')
+        fl = r.file(b'mercurial/manifest.py')
         rl = getattr(fl, '_revlog', fl)
         bins = rl._chunks(rl._deltachain(10)[0])
-        zf.writestr('manifest_py_rev_10', str(corpus(bins[0], bins[1:])))
+        zf.writestr('manifest_py_rev_10', bytes(corpus(bins[0], bins[1:])))
     except:  # skip this, so no re-raises
         print('skipping seed file from repo data')
     # Automatically discovered by running the fuzzer
     zf.writestr(
-        "mpatch_decode_old_overread", "\x02\x00\x00\x00\x02\x00\x00\x00"
+        "mpatch_decode_old_overread", b"\x02\x00\x00\x00\x02\x00\x00\x00"
     )
     # https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=8876
     zf.writestr(
         "mpatch_ossfuzz_getbe32_ubsan",
-        "\x02\x00\x00\x00\x0c    \xff\xff\xff\xff    ",
+        b"\x02\x00\x00\x00\x0c    \xff\xff\xff\xff    ",
     )
     zf.writestr(
         "mpatch_apply_over_memcpy",
-        '\x13\x01\x00\x05\xd0\x00\x00\x00\x00\x00\x00\x00\x00\n \x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x8c\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00)\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00A\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x94\x18'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\xff\xfa\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x94\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xfa\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00]\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00se\x00\x00'
-        '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-        '\x00\x00\x00\x00',
+        b'\x13\x01\x00\x05\xd0\x00\x00\x00\x00\x00\x00\x00\x00\n \x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x8c\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00)\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00A\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x94\x18'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\xff\xfa\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x94\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xfa\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00]\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00se\x00\x00'
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+        b'\x00\x00\x00\x00',
     )
--- a/contrib/fuzz/pyutil.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/pyutil.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -1,21 +1,31 @@
 #include "pyutil.h"
 
+#include <iostream>
 #include <string>
 
 namespace contrib
 {
 
+#if PY_MAJOR_VERSION >= 3
+#define HG_FUZZER_PY3 1
+PyMODINIT_FUNC PyInit_parsers(void);
+#else
+PyMODINIT_FUNC initparsers(void);
+#endif
+
 static char cpypath[8192] = "\0";
 
 static PyObject *mainmod;
 static PyObject *globals;
 
-/* TODO: use Python 3 for this fuzzing? */
-PyMODINIT_FUNC initparsers(void);
-
 void initpy(const char *cselfpath)
 {
+#ifdef HG_FUZZER_PY3
+	const std::string subdir = "/sanpy/lib/python3.7";
+#else
 	const std::string subdir = "/sanpy/lib/python2.7";
+#endif
+
 	/* HACK ALERT: we need a full Python installation built without
 	   pymalloc and with ASAN, so we dump one in
 	   $OUT/sanpy/lib/python2.7. This helps us wire that up. */
@@ -24,7 +34,11 @@
 	auto pos = selfpath.rfind("/");
 	if (pos == std::string::npos) {
 		char wd[8192];
-		getcwd(wd, 8192);
+		if (!getcwd(wd, 8192)) {
+			std::cerr << "Failed to call getcwd: errno " << errno
+			          << std::endl;
+			exit(1);
+		}
 		pypath = std::string(wd) + subdir;
 	} else {
 		pypath = selfpath.substr(0, pos) + subdir;
@@ -34,11 +48,24 @@
 	setenv("PYTHONNOUSERSITE", "1", 1);
 	/* prevent Python from looking up users in the fuzz environment */
 	setenv("PYTHONUSERBASE", cpypath, 1);
+#ifdef HG_FUZZER_PY3
+	std::wstring wcpypath(pypath.begin(), pypath.end());
+	Py_SetPythonHome(wcpypath.c_str());
+#else
 	Py_SetPythonHome(cpypath);
+#endif
 	Py_InitializeEx(0);
 	mainmod = PyImport_AddModule("__main__");
 	globals = PyModule_GetDict(mainmod);
+
+#ifdef HG_FUZZER_PY3
+	PyObject *mod = PyInit_parsers();
+#else
 	initparsers();
+	PyObject *mod = PyImport_ImportModule("parsers");
+#endif
+
+	PyDict_SetItemString(globals, "parsers", mod);
 }
 
 PyObject *pyglobals()
--- a/contrib/fuzz/pyutil.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/pyutil.h	Tue Jan 21 13:14:51 2020 -0500
@@ -1,5 +1,11 @@
 #include <Python.h>
 
+#if PY_MAJOR_VERSION >= 3
+#define PYCODETYPE PyObject
+#else
+#define PYCODETYPE PyCodeObject
+#endif
+
 namespace contrib
 {
 
--- a/contrib/fuzz/revlog.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/revlog.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -9,16 +9,15 @@
 
 extern "C" {
 
-static PyCodeObject *code;
+static PYCODETYPE *code;
 
 extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
 {
 	contrib::initpy(*argv[0]);
-	code = (PyCodeObject *)Py_CompileString(R"py(
-from parsers import parse_index2
+	code = (PYCODETYPE *)Py_CompileString(R"py(
 for inline in (True, False):
     try:
-        index, cache = parse_index2(data, inline)
+        index, cache = parsers.parse_index2(data, inline)
         index.slicechunktodensity(list(range(len(index))), 0.5, 262144)
         index.stats()
         index.findsnapshots({}, 0)
@@ -35,7 +34,7 @@
         # to debug failures.
         # print e
 )py",
-	                                        "fuzzer", Py_file_input);
+	                                      "fuzzer", Py_file_input);
 	return 0;
 }
 
--- a/contrib/fuzz/revlog_corpus.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/revlog_corpus.py	Tue Jan 21 13:14:51 2020 -0500
@@ -1,4 +1,4 @@
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import
 
 import argparse
 import os
@@ -16,13 +16,10 @@
     reporoot, '.hg', 'store', 'data', 'contrib', 'fuzz', 'mpatch.cc.i'
 )
 
-print(changelog, os.path.exists(changelog))
-print(contributing, os.path.exists(contributing))
-
 with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
     if os.path.exists(changelog):
-        with open(changelog) as f:
+        with open(changelog, 'rb') as f:
             zf.writestr("00changelog.i", f.read())
     if os.path.exists(contributing):
-        with open(contributing) as f:
+        with open(contributing, 'rb') as f:
             zf.writestr("contributing.i", f.read())
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/standalone_fuzz_target_runner.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,45 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+// Licensed under the Apache License, Version 2.0 (the "License");
+
+// Example of a standalone runner for "fuzz targets".
+// It reads all files passed as parameters and feeds their contents
+// one by one into the fuzz target (LLVMFuzzerTestOneInput).
+// This runner does not do any fuzzing, but allows us to run the fuzz target
+// on the test corpus (e.g. "do_stuff_test_data") or on a single file,
+// e.g. the one that comes from a bug report.
+
+#include <cassert>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+// Forward declare the "fuzz target" interface.
+// We deliberately keep this inteface simple and header-free.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size);
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv);
+
+int main(int argc, char **argv)
+{
+	LLVMFuzzerInitialize(&argc, &argv);
+
+	for (int i = 1; i < argc; i++) {
+		std::ifstream in(argv[i]);
+		in.seekg(0, in.end);
+		size_t length = in.tellg();
+		in.seekg(0, in.beg);
+		std::cout << "Reading " << length << " bytes from " << argv[i]
+		          << std::endl;
+		// Allocate exactly length bytes so that we reliably catch
+		// buffer overflows.
+		std::vector<char> bytes(length);
+		in.read(bytes.data(), bytes.size());
+		assert(in);
+		LLVMFuzzerTestOneInput(
+		    reinterpret_cast<const uint8_t *>(bytes.data()),
+		    bytes.size());
+		std::cout << "Execution successful" << std::endl;
+	}
+	return 0;
+}
+// no-check-code since this is from a third party
--- a/contrib/fuzz/xdiff.cc	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/fuzz/xdiff.cc	Tue Jan 21 13:14:51 2020 -0500
@@ -10,10 +10,15 @@
 #include <inttypes.h>
 #include <stdlib.h>
 
-#include "fuzzutil.h"
+#include "FuzzedDataProvider.h"
 
 extern "C" {
 
+int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+	return 0;
+}
+
 int hunk_consumer(long a1, long a2, long b1, long b2, void *priv)
 {
 	// TODO: probably also test returning -1 from this when things break?
@@ -27,17 +32,15 @@
 	if (Size > 100000) {
 		return 0;
 	}
-	auto maybe_inputs = SplitInputs(Data, Size);
-	if (!maybe_inputs) {
-		return 0;
-	}
-	auto inputs = std::move(maybe_inputs.value());
+	FuzzedDataProvider provider(Data, Size);
+	std::string left = provider.ConsumeRandomLengthString(Size);
+	std::string right = provider.ConsumeRemainingBytesAsString();
 	mmfile_t a, b;
 
-	a.ptr = inputs.left.get();
-	a.size = inputs.left_size;
-	b.ptr = inputs.right.get();
-	b.size = inputs.right_size;
+	a.ptr = (char *)left.c_str();
+	a.size = left.size();
+	b.ptr = (char *)right.c_str();
+	b.size = right.size();
 	xpparam_t xpp = {
 	    XDF_INDENT_HEURISTIC, /* flags */
 	};
@@ -52,12 +55,4 @@
 	return 0; // Non-zero return values are reserved for future use.
 }
 
-#ifdef HG_FUZZER_INCLUDE_MAIN
-int main(int argc, char **argv)
-{
-	const char data[] = "asdf";
-	return LLVMFuzzerTestOneInput((const uint8_t *)data, 4);
-}
-#endif
-
 } // extern "C"
--- a/contrib/hg-ssh	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/hg-ssh	Tue Jan 21 13:14:51 2020 -0500
@@ -35,7 +35,9 @@
 import sys
 
 # enable importing on demand to reduce startup time
-import hgdemandimport ; hgdemandimport.enable()
+import hgdemandimport
+
+hgdemandimport.enable()
 
 from mercurial import (
     dispatch,
@@ -43,6 +45,7 @@
     ui as uimod,
 )
 
+
 def main():
     # Prevent insertion/deletion of CRs
     dispatch.initstdio()
@@ -56,9 +59,10 @@
             args.pop(0)
         else:
             break
-    allowed_paths = [os.path.normpath(os.path.join(cwd,
-                                                   os.path.expanduser(path)))
-                     for path in args]
+    allowed_paths = [
+        os.path.normpath(os.path.join(cwd, os.path.expanduser(path)))
+        for path in args
+    ]
     orig_cmd = os.getenv('SSH_ORIGINAL_COMMAND', '?')
     try:
         cmdargv = shlex.split(orig_cmd)
@@ -75,10 +79,18 @@
             if readonly:
                 if not req.ui:
                     req.ui = uimod.ui.load()
-                req.ui.setconfig(b'hooks', b'pretxnopen.hg-ssh',
-                                 b'python:__main__.rejectpush', b'hg-ssh')
-                req.ui.setconfig(b'hooks', b'prepushkey.hg-ssh',
-                                 b'python:__main__.rejectpush', b'hg-ssh')
+                req.ui.setconfig(
+                    b'hooks',
+                    b'pretxnopen.hg-ssh',
+                    b'python:__main__.rejectpush',
+                    b'hg-ssh',
+                )
+                req.ui.setconfig(
+                    b'hooks',
+                    b'prepushkey.hg-ssh',
+                    b'python:__main__.rejectpush',
+                    b'hg-ssh',
+                )
             dispatch.dispatch(req)
         else:
             sys.stderr.write('Illegal repository "%s"\n' % repo)
@@ -87,11 +99,13 @@
         sys.stderr.write('Illegal command "%s"\n' % orig_cmd)
         sys.exit(255)
 
+
 def rejectpush(ui, **kwargs):
     ui.warn((b"Permission denied\n"))
     # mercurial hooks use unix process conventions for hook return values
     # so a truthy return means failure
     return True
 
+
 if __name__ == '__main__':
     main()
--- a/contrib/hgclient.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/hgclient.py	Tue Jan 21 13:14:51 2020 -0500
@@ -39,7 +39,7 @@
     cmdline.extend(extraargs)
 
     def tonative(cmdline):
-        if os.name != r'nt':
+        if os.name != 'nt':
             return cmdline
         return [arg.decode("utf-8") for arg in cmdline]
 
--- a/contrib/hgperf	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/hgperf	Tue Jan 21 13:14:51 2020 -0500
@@ -37,18 +37,24 @@
 
 if libdir != '@' 'LIBDIR' '@':
     if not os.path.isabs(libdir):
-        libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
-                              libdir)
+        libdir = os.path.join(
+            os.path.dirname(os.path.realpath(__file__)), libdir
+        )
         libdir = os.path.abspath(libdir)
     sys.path.insert(0, libdir)
 
 # enable importing on demand to reduce startup time
 try:
-    from mercurial import demandimport; demandimport.enable()
+    from mercurial import demandimport
+
+    demandimport.enable()
 except ImportError:
     import sys
-    sys.stderr.write("abort: couldn't find mercurial libraries in [%s]\n" %
-                     ' '.join(sys.path))
+
+    sys.stderr.write(
+        "abort: couldn't find mercurial libraries in [%s]\n"
+        % ' '.join(sys.path)
+    )
     sys.stderr.write("(check your install and PYTHONPATH)\n")
     sys.exit(-1)
 
@@ -57,6 +63,7 @@
     util,
 )
 
+
 def timer(func, title=None):
     results = []
     begin = util.timer()
@@ -69,7 +76,7 @@
         ostop = os.times()
         count += 1
         a, b = ostart, ostop
-        results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
+        results.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
         if cstop - begin > 3 and count >= 100:
             break
         if cstop - begin > 10 and count >= 3:
@@ -79,19 +86,27 @@
     if r:
         sys.stderr.write("! result: %s\n" % r)
     m = min(results)
-    sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n"
-                     % (m[0], m[1] + m[2], m[1], m[2], count))
+    sys.stderr.write(
+        "! wall %f comb %f user %f sys %f (best of %d)\n"
+        % (m[0], m[1] + m[2], m[1], m[2], count)
+    )
+
 
 orgruncommand = dispatch.runcommand
 
+
 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
     ui.pushbuffer()
     lui.pushbuffer()
-    timer(lambda : orgruncommand(lui, repo, cmd, fullargs, ui,
-                                 options, d, cmdpats, cmdoptions))
+    timer(
+        lambda: orgruncommand(
+            lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions
+        )
+    )
     ui.popbuffer()
     lui.popbuffer()
 
+
 dispatch.runcommand = runcommand
 
 dispatch.run()
--- a/contrib/hgweb.fcgi	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/hgweb.fcgi	Tue Jan 21 13:14:51 2020 -0500
@@ -7,13 +7,16 @@
 
 # Uncomment and adjust if Mercurial is not installed system-wide
 # (consult "installed modules" path from 'hg debuginstall'):
-#import sys; sys.path.insert(0, "/path/to/python/lib")
+# import sys; sys.path.insert(0, "/path/to/python/lib")
 
 # Uncomment to send python tracebacks to the browser if an error occurs:
-#import cgitb; cgitb.enable()
+# import cgitb; cgitb.enable()
 
-from mercurial import demandimport; demandimport.enable()
+from mercurial import demandimport
+
+demandimport.enable()
 from mercurial.hgweb import hgweb
 from flup.server.fcgi import WSGIServer
+
 application = hgweb(config)
 WSGIServer(application).run()
--- a/contrib/import-checker.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/import-checker.py	Tue Jan 21 13:14:51 2020 -0500
@@ -535,6 +535,8 @@
             if fullname != '__future__':
                 if not fullname or (
                     fullname in stdlib_modules
+                    # allow standard 'from typing import ...' style
+                    and fullname.startswith('.')
                     and fullname not in localmods
                     and fullname + '.__init__' not in localmods
                 ):
--- a/contrib/packaging/docker/ubuntu.template	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/docker/ubuntu.template	Tue Jan 21 13:14:51 2020 -0500
@@ -10,7 +10,8 @@
   dh-python \
   less \
   python \
-  python-all-dev \
-  python-docutils \
+  python3-all \
+  python3-all-dev \
+  python3-docutils \
   unzip \
   zip
--- a/contrib/packaging/hg-docker	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/hg-docker	Tue Jan 21 13:14:51 2020 -0500
@@ -11,6 +11,7 @@
 import subprocess
 import sys
 
+
 def get_docker() -> str:
     docker = shutil.which('docker.io') or shutil.which('docker')
     if not docker:
@@ -21,15 +22,16 @@
         out = subprocess.check_output([docker, '-h'], stderr=subprocess.STDOUT)
 
         if b'Jansens' in out:
-            print('%s is the Docking System Tray; try installing docker.io' %
-                  docker)
+            print(
+                '%s is the Docking System Tray; try installing docker.io'
+                % docker
+            )
             sys.exit(1)
     except subprocess.CalledProcessError as e:
         print('error calling `%s -h`: %s' % (docker, e.output))
         sys.exit(1)
 
-    out = subprocess.check_output([docker, 'version'],
-                                  stderr=subprocess.STDOUT)
+    out = subprocess.check_output([docker, 'version'], stderr=subprocess.STDOUT)
 
     lines = out.splitlines()
     if not any(l.startswith((b'Client:', b'Client version:')) for l in lines):
@@ -42,6 +44,7 @@
 
     return docker
 
+
 def get_dockerfile(path: pathlib.Path, args: list) -> bytes:
     with path.open('rb') as fh:
         df = fh.read()
@@ -51,6 +54,7 @@
 
     return df
 
+
 def build_docker_image(dockerfile: pathlib.Path, params: list, tag: str):
     """Build a Docker image from a templatized Dockerfile."""
     docker = get_docker()
@@ -65,9 +69,12 @@
     args = [
         docker,
         'build',
-        '--build-arg', 'http_proxy',
-        '--build-arg', 'https_proxy',
-        '--tag', tag,
+        '--build-arg',
+        'http_proxy',
+        '--build-arg',
+        'https_proxy',
+        '--tag',
+        tag,
         '-',
     ]
 
@@ -76,8 +83,10 @@
     p.communicate(input=dockerfile)
     if p.returncode:
         raise subprocess.CalledProcessException(
-                p.returncode, 'failed to build docker image: %s %s'
-                % (p.stdout, p.stderr))
+            p.returncode,
+            'failed to build docker image: %s %s' % (p.stdout, p.stderr),
+        )
+
 
 def command_build(args):
     build_args = []
@@ -85,13 +94,13 @@
         k, v = arg.split('=', 1)
         build_args.append((k.encode('utf-8'), v.encode('utf-8')))
 
-    build_docker_image(pathlib.Path(args.dockerfile),
-                       build_args,
-                       args.tag)
+    build_docker_image(pathlib.Path(args.dockerfile), build_args, args.tag)
+
 
 def command_docker(args):
     print(get_docker())
 
+
 def main() -> int:
     parser = argparse.ArgumentParser()
 
@@ -99,9 +108,12 @@
 
     build = subparsers.add_parser('build', help='Build a Docker image')
     build.set_defaults(func=command_build)
-    build.add_argument('--build-arg', action='append', default=[],
-                        help='Substitution to perform in Dockerfile; '
-                             'format: key=value')
+    build.add_argument(
+        '--build-arg',
+        action='append',
+        default=[],
+        help='Substitution to perform in Dockerfile; ' 'format: key=value',
+    )
     build.add_argument('dockerfile', help='path to Dockerfile to use')
     build.add_argument('tag', help='Tag to apply to created image')
 
@@ -112,5 +124,6 @@
 
     return args.func(args)
 
+
 if __name__ == '__main__':
     sys.exit(main())
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/packaging/hgpackaging/cli.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,153 @@
+# cli.py - Command line interface for automation
+#
+# Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# no-check-code because Python 3 native.
+
+import argparse
+import os
+import pathlib
+
+from . import (
+    inno,
+    wix,
+)
+
+HERE = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
+SOURCE_DIR = HERE.parent.parent.parent
+
+
+def build_inno(python=None, iscc=None, version=None):
+    if not os.path.isabs(python):
+        raise Exception("--python arg must be an absolute path")
+
+    if iscc:
+        iscc = pathlib.Path(iscc)
+    else:
+        iscc = (
+            pathlib.Path(os.environ["ProgramFiles(x86)"])
+            / "Inno Setup 5"
+            / "ISCC.exe"
+        )
+
+    build_dir = SOURCE_DIR / "build"
+
+    inno.build(
+        SOURCE_DIR, build_dir, pathlib.Path(python), iscc, version=version,
+    )
+
+
+def build_wix(
+    name=None,
+    python=None,
+    version=None,
+    sign_sn=None,
+    sign_cert=None,
+    sign_password=None,
+    sign_timestamp_url=None,
+    extra_packages_script=None,
+    extra_wxs=None,
+    extra_features=None,
+):
+    fn = wix.build_installer
+    kwargs = {
+        "source_dir": SOURCE_DIR,
+        "python_exe": pathlib.Path(python),
+        "version": version,
+    }
+
+    if not os.path.isabs(python):
+        raise Exception("--python arg must be an absolute path")
+
+    if extra_packages_script:
+        kwargs["extra_packages_script"] = extra_packages_script
+    if extra_wxs:
+        kwargs["extra_wxs"] = dict(
+            thing.split("=") for thing in extra_wxs.split(",")
+        )
+    if extra_features:
+        kwargs["extra_features"] = extra_features.split(",")
+
+    if sign_sn or sign_cert:
+        fn = wix.build_signed_installer
+        kwargs["name"] = name
+        kwargs["subject_name"] = sign_sn
+        kwargs["cert_path"] = sign_cert
+        kwargs["cert_password"] = sign_password
+        kwargs["timestamp_url"] = sign_timestamp_url
+
+    fn(**kwargs)
+
+
+def get_parser():
+    parser = argparse.ArgumentParser()
+
+    subparsers = parser.add_subparsers()
+
+    sp = subparsers.add_parser("inno", help="Build Inno Setup installer")
+    sp.add_argument("--python", required=True, help="path to python.exe to use")
+    sp.add_argument("--iscc", help="path to iscc.exe to use")
+    sp.add_argument(
+        "--version",
+        help="Mercurial version string to use "
+        "(detected from __version__.py if not defined",
+    )
+    sp.set_defaults(func=build_inno)
+
+    sp = subparsers.add_parser(
+        "wix", help="Build Windows installer with WiX Toolset"
+    )
+    sp.add_argument("--name", help="Application name", default="Mercurial")
+    sp.add_argument(
+        "--python", help="Path to Python executable to use", required=True
+    )
+    sp.add_argument(
+        "--sign-sn",
+        help="Subject name (or fragment thereof) of certificate "
+        "to use for signing",
+    )
+    sp.add_argument(
+        "--sign-cert", help="Path to certificate to use for signing"
+    )
+    sp.add_argument("--sign-password", help="Password for signing certificate")
+    sp.add_argument(
+        "--sign-timestamp-url",
+        help="URL of timestamp server to use for signing",
+    )
+    sp.add_argument("--version", help="Version string to use")
+    sp.add_argument(
+        "--extra-packages-script",
+        help=(
+            "Script to execute to include extra packages in " "py2exe binary."
+        ),
+    )
+    sp.add_argument(
+        "--extra-wxs", help="CSV of path_to_wxs_file=working_dir_for_wxs_file"
+    )
+    sp.add_argument(
+        "--extra-features",
+        help=(
+            "CSV of extra feature names to include "
+            "in the installer from the extra wxs files"
+        ),
+    )
+    sp.set_defaults(func=build_wix)
+
+    return parser
+
+
+def main():
+    parser = get_parser()
+    args = parser.parse_args()
+
+    if not hasattr(args, "func"):
+        parser.print_help()
+        return
+
+    kwargs = dict(vars(args))
+    del kwargs["func"]
+
+    args.func(**kwargs)
--- a/contrib/packaging/hgpackaging/inno.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/hgpackaging/inno.py	Tue Jan 21 13:14:51 2020 -0500
@@ -12,9 +12,16 @@
 import shutil
 import subprocess
 
-from .py2exe import build_py2exe
-from .util import find_vc_runtime_files
+import jinja2
 
+from .py2exe import (
+    build_py2exe,
+    stage_install,
+)
+from .util import (
+    find_vc_runtime_files,
+    read_version_py,
+)
 
 EXTRA_PACKAGES = {
     'dulwich',
@@ -23,6 +30,10 @@
     'win32ctypes',
 }
 
+PACKAGE_FILES_METADATA = {
+    'ReadMe.html': 'Flags: isreadme',
+}
+
 
 def build(
     source_dir: pathlib.Path,
@@ -43,11 +54,17 @@
         raise Exception('%s does not exist' % iscc_exe)
 
     vc_x64 = r'\x64' in os.environ.get('LIB', '')
+    arch = 'x64' if vc_x64 else 'x86'
+    inno_source_dir = source_dir / 'contrib' / 'packaging' / 'inno'
+    inno_build_dir = build_dir / ('inno-%s' % arch)
+    staging_dir = inno_build_dir / 'stage'
 
     requirements_txt = (
         source_dir / 'contrib' / 'packaging' / 'inno' / 'requirements.txt'
     )
 
+    inno_build_dir.mkdir(parents=True, exist_ok=True)
+
     build_py2exe(
         source_dir,
         build_dir,
@@ -57,6 +74,15 @@
         extra_packages=EXTRA_PACKAGES,
     )
 
+    # Purge the staging directory for every build so packaging is
+    # pristine.
+    if staging_dir.exists():
+        print('purging %s' % staging_dir)
+        shutil.rmtree(staging_dir)
+
+    # Now assemble all the packaged files into the staging directory.
+    stage_install(source_dir, staging_dir)
+
     # hg.exe depends on VC9 runtime DLLs. Copy those into place.
     for f in find_vc_runtime_files(vc_x64):
         if f.name.endswith('.manifest'):
@@ -64,22 +90,74 @@
         else:
             basename = f.name
 
-        dest_path = source_dir / 'dist' / basename
+        dest_path = staging_dir / basename
 
         print('copying %s to %s' % (f, dest_path))
         shutil.copyfile(f, dest_path)
 
+    # The final package layout is simply a mirror of the staging directory.
+    package_files = []
+    for root, dirs, files in os.walk(staging_dir):
+        dirs.sort()
+
+        root = pathlib.Path(root)
+
+        for f in sorted(files):
+            full = root / f
+            rel = full.relative_to(staging_dir)
+            if str(rel.parent) == '.':
+                dest_dir = '{app}'
+            else:
+                dest_dir = '{app}\\%s' % rel.parent
+
+            package_files.append(
+                {
+                    'source': rel,
+                    'dest_dir': dest_dir,
+                    'metadata': PACKAGE_FILES_METADATA.get(str(rel), None),
+                }
+            )
+
     print('creating installer')
 
+    # Install Inno files by rendering a template.
+    jinja_env = jinja2.Environment(
+        loader=jinja2.FileSystemLoader(str(inno_source_dir)),
+        # Need to change these to prevent conflict with Inno Setup.
+        comment_start_string='{##',
+        comment_end_string='##}',
+    )
+
+    try:
+        template = jinja_env.get_template('mercurial.iss')
+    except jinja2.TemplateSyntaxError as e:
+        raise Exception(
+            'template syntax error at %s:%d: %s'
+            % (e.name, e.lineno, e.message,)
+        )
+
+    content = template.render(package_files=package_files)
+
+    with (inno_build_dir / 'mercurial.iss').open('w', encoding='utf-8') as fh:
+        fh.write(content)
+
+    # Copy additional files used by Inno.
+    for p in ('mercurial.ico', 'postinstall.txt'):
+        shutil.copyfile(
+            source_dir / 'contrib' / 'win32' / p, inno_build_dir / p
+        )
+
     args = [str(iscc_exe)]
 
     if vc_x64:
         args.append('/dARCH=x64')
 
-    if version:
-        args.append('/dVERSION=%s' % version)
+    if not version:
+        version = read_version_py(source_dir)
+
+    args.append('/dVERSION=%s' % version)
 
     args.append('/Odist')
-    args.append('contrib/packaging/inno/mercurial.iss')
+    args.append(str(inno_build_dir / 'mercurial.iss'))
 
     subprocess.run(args, cwd=str(source_dir), check=True)
--- a/contrib/packaging/hgpackaging/py2exe.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/hgpackaging/py2exe.py	Tue Jan 21 13:14:51 2020 -0500
@@ -15,10 +15,48 @@
 from .util import (
     extract_tar_to_directory,
     extract_zip_to_directory,
+    process_install_rules,
     python_exe_info,
 )
 
 
+STAGING_RULES = [
+    ('contrib/bash_completion', 'Contrib/'),
+    ('contrib/hgk', 'Contrib/hgk.tcl'),
+    ('contrib/hgweb.fcgi', 'Contrib/'),
+    ('contrib/hgweb.wsgi', 'Contrib/'),
+    ('contrib/logo-droplets.svg', 'Contrib/'),
+    ('contrib/mercurial.el', 'Contrib/'),
+    ('contrib/mq.el', 'Contrib/'),
+    ('contrib/tcsh_completion', 'Contrib/'),
+    ('contrib/tcsh_completion_build.sh', 'Contrib/'),
+    ('contrib/vim/*', 'Contrib/Vim/'),
+    ('contrib/win32/postinstall.txt', 'ReleaseNotes.txt'),
+    ('contrib/win32/ReadMe.html', 'ReadMe.html'),
+    ('contrib/xml.rnc', 'Contrib/'),
+    ('contrib/zsh_completion', 'Contrib/'),
+    ('dist/hg.exe', './'),
+    ('dist/lib/*.dll', 'lib/'),
+    ('dist/lib/*.pyd', 'lib/'),
+    ('dist/lib/library.zip', 'lib/'),
+    ('dist/Microsoft.VC*.CRT.manifest', './'),
+    ('dist/msvc*.dll', './'),
+    ('dist/python*.dll', './'),
+    ('doc/*.html', 'doc/'),
+    ('doc/style.css', 'doc/'),
+    ('mercurial/helptext/**/*.txt', 'helptext/'),
+    ('mercurial/defaultrc/*.rc', 'hgrc.d/'),
+    ('mercurial/locale/**/*', 'locale/'),
+    ('mercurial/templates/**/*', 'Templates/'),
+    ('COPYING', 'Copying.txt'),
+]
+
+# List of paths to exclude from the staging area.
+STAGING_EXCLUDES = [
+    'doc/hg-ssh.8.html',
+]
+
+
 def build_py2exe(
     source_dir: pathlib.Path,
     build_dir: pathlib.Path,
@@ -169,3 +207,39 @@
         env=env,
         check=True,
     )
+
+
+def stage_install(
+    source_dir: pathlib.Path, staging_dir: pathlib.Path, lower_case=False
+):
+    """Copy all files to be installed to a directory.
+
+    This allows packaging to simply walk a directory tree to find source
+    files.
+    """
+    if lower_case:
+        rules = []
+        for source, dest in STAGING_RULES:
+            # Only lower directory names.
+            if '/' in dest:
+                parent, leaf = dest.rsplit('/', 1)
+                dest = '%s/%s' % (parent.lower(), leaf)
+            rules.append((source, dest))
+    else:
+        rules = STAGING_RULES
+
+    process_install_rules(rules, source_dir, staging_dir)
+
+    # Write out a default editor.rc file to configure notepad as the
+    # default editor.
+    with (staging_dir / 'hgrc.d' / 'editor.rc').open(
+        'w', encoding='utf-8'
+    ) as fh:
+        fh.write('[ui]\neditor = notepad\n')
+
+    # Purge any files we don't want to be there.
+    for f in STAGING_EXCLUDES:
+        p = staging_dir / f
+        if p.exists():
+            print('removing %s' % p)
+            p.unlink()
--- a/contrib/packaging/hgpackaging/util.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/hgpackaging/util.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,8 +9,11 @@
 
 import distutils.version
 import getpass
+import glob
 import os
 import pathlib
+import re
+import shutil
 import subprocess
 import tarfile
 import zipfile
@@ -164,3 +167,60 @@
         'version': version,
         'py3': version >= distutils.version.LooseVersion('3'),
     }
+
+
+def process_install_rules(
+    rules: list, source_dir: pathlib.Path, dest_dir: pathlib.Path
+):
+    for source, dest in rules:
+        if '*' in source:
+            if not dest.endswith('/'):
+                raise ValueError('destination must end in / when globbing')
+
+            # We strip off the source path component before the first glob
+            # character to construct the relative install path.
+            prefix_end_index = source[: source.index('*')].rindex('/')
+            relative_prefix = source_dir / source[0:prefix_end_index]
+
+            for res in glob.glob(str(source_dir / source), recursive=True):
+                source_path = pathlib.Path(res)
+
+                if source_path.is_dir():
+                    continue
+
+                rel_path = source_path.relative_to(relative_prefix)
+
+                dest_path = dest_dir / dest[:-1] / rel_path
+
+                dest_path.parent.mkdir(parents=True, exist_ok=True)
+                print('copying %s to %s' % (source_path, dest_path))
+                shutil.copy(source_path, dest_path)
+
+        # Simple file case.
+        else:
+            source_path = pathlib.Path(source)
+
+            if dest.endswith('/'):
+                dest_path = pathlib.Path(dest) / source_path.name
+            else:
+                dest_path = pathlib.Path(dest)
+
+            full_source_path = source_dir / source_path
+            full_dest_path = dest_dir / dest_path
+
+            full_dest_path.parent.mkdir(parents=True, exist_ok=True)
+            shutil.copy(full_source_path, full_dest_path)
+            print('copying %s to %s' % (full_source_path, full_dest_path))
+
+
+def read_version_py(source_dir):
+    """Read the mercurial/__version__.py file to resolve the version string."""
+    p = source_dir / 'mercurial' / '__version__.py'
+
+    with p.open('r', encoding='utf-8') as fh:
+        m = re.search('version = b"([^"]+)"', fh.read(), re.MULTILINE)
+
+        if not m:
+            raise Exception('could not parse %s' % p)
+
+        return m.group(1)
--- a/contrib/packaging/hgpackaging/wix.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/hgpackaging/wix.py	Tue Jan 21 13:14:51 2020 -0500
@@ -7,39 +7,60 @@
 
 # no-check-code because Python 3 native.
 
+import collections
 import os
 import pathlib
 import re
+import shutil
 import subprocess
-import tempfile
 import typing
+import uuid
 import xml.dom.minidom
 
 from .downloads import download_entry
-from .py2exe import build_py2exe
+from .py2exe import (
+    build_py2exe,
+    stage_install,
+)
 from .util import (
     extract_zip_to_directory,
+    process_install_rules,
     sign_with_signtool,
 )
 
 
-SUPPORT_WXS = [
-    ('contrib.wxs', r'contrib'),
-    ('dist.wxs', r'dist'),
-    ('doc.wxs', r'doc'),
-    ('help.wxs', r'mercurial\help'),
-    ('i18n.wxs', r'i18n'),
-    ('locale.wxs', r'mercurial\locale'),
-    ('templates.wxs', r'mercurial\templates'),
-]
-
-
 EXTRA_PACKAGES = {
     'distutils',
     'pygments',
 }
 
 
+EXTRA_INSTALL_RULES = [
+    ('contrib/packaging/wix/COPYING.rtf', 'COPYING.rtf'),
+    ('contrib/win32/mercurial.ini', 'hgrc.d/mercurial.rc'),
+]
+
+STAGING_REMOVE_FILES = [
+    # We use the RTF variant.
+    'copying.txt',
+]
+
+SHORTCUTS = {
+    # hg.1.html'
+    'hg.file.5d3e441c_28d9_5542_afd0_cdd4234f12d5': {
+        'Name': 'Mercurial Command Reference',
+    },
+    # hgignore.5.html
+    'hg.file.5757d8e0_f207_5e10_a2ec_3ba0a062f431': {
+        'Name': 'Mercurial Ignore Files',
+    },
+    # hgrc.5.html
+    'hg.file.92e605fd_1d1a_5dc6_9fc0_5d2998eb8f5e': {
+        'Name': 'Mercurial Configuration Files',
+    },
+}
+
+
 def find_version(source_dir: pathlib.Path):
     version_py = source_dir / 'mercurial' / '__version__.py'
 
@@ -148,49 +169,165 @@
     return post_build_sign
 
 
-LIBRARIES_XML = '''
-<?xml version="1.0" encoding="utf-8"?>
-<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
-
-  <?include {wix_dir}/guids.wxi ?>
-  <?include {wix_dir}/defines.wxi ?>
+def make_files_xml(staging_dir: pathlib.Path, is_x64) -> str:
+    """Create XML string listing every file to be installed."""
 
-  <Fragment>
-    <DirectoryRef Id="INSTALLDIR" FileSource="$(var.SourceDir)">
-      <Directory Id="libdir" Name="lib" FileSource="$(var.SourceDir)/lib">
-        <Component Id="libOutput" Guid="$(var.lib.guid)" Win64='$(var.IsX64)'>
-        </Component>
-      </Directory>
-    </DirectoryRef>
-  </Fragment>
-</Wix>
-'''.lstrip()
+    # We derive GUIDs from a deterministic file path identifier.
+    # We shoehorn the name into something that looks like a URL because
+    # the UUID namespaces are supposed to work that way (even though
+    # the input data probably is never validated).
 
-
-def make_libraries_xml(wix_dir: pathlib.Path, dist_dir: pathlib.Path):
-    """Make XML data for library components WXS."""
-    # We can't use ElementTree because it doesn't handle the
-    # <?include ?> directives.
     doc = xml.dom.minidom.parseString(
-        LIBRARIES_XML.format(wix_dir=str(wix_dir))
+        '<?xml version="1.0" encoding="utf-8"?>'
+        '<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">'
+        '</Wix>'
     )
 
-    component = doc.getElementsByTagName('Component')[0]
+    # Assemble the install layout by directory. This makes it easier to
+    # emit XML, since each directory has separate entities.
+    manifest = collections.defaultdict(dict)
+
+    for root, dirs, files in os.walk(staging_dir):
+        dirs.sort()
+
+        root = pathlib.Path(root)
+        rel_dir = root.relative_to(staging_dir)
+
+        for i in range(len(rel_dir.parts)):
+            parent = '/'.join(rel_dir.parts[0 : i + 1])
+            manifest.setdefault(parent, {})
+
+        for f in sorted(files):
+            full = root / f
+            manifest[str(rel_dir).replace('\\', '/')][full.name] = full
+
+    component_groups = collections.defaultdict(list)
+
+    # Now emit a <Fragment> for each directory.
+    # Each directory is composed of a <DirectoryRef> pointing to its parent
+    # and defines child <Directory>'s and a <Component> with all the files.
+    for dir_name, entries in sorted(manifest.items()):
+        # The directory id is derived from the path. But the root directory
+        # is special.
+        if dir_name == '.':
+            parent_directory_id = 'INSTALLDIR'
+        else:
+            parent_directory_id = 'hg.dir.%s' % dir_name.replace('/', '.')
 
-    f = doc.createElement('File')
-    f.setAttribute('Name', 'library.zip')
-    f.setAttribute('KeyPath', 'yes')
-    component.appendChild(f)
+        fragment = doc.createElement('Fragment')
+        directory_ref = doc.createElement('DirectoryRef')
+        directory_ref.setAttribute('Id', parent_directory_id)
+
+        # Add <Directory> entries for immediate children directories.
+        for possible_child in sorted(manifest.keys()):
+            if (
+                dir_name == '.'
+                and '/' not in possible_child
+                and possible_child != '.'
+            ):
+                child_directory_id = 'hg.dir.%s' % possible_child
+                name = possible_child
+            else:
+                if not possible_child.startswith('%s/' % dir_name):
+                    continue
+                name = possible_child[len(dir_name) + 1 :]
+                if '/' in name:
+                    continue
+
+                child_directory_id = 'hg.dir.%s' % possible_child.replace(
+                    '/', '.'
+                )
+
+            directory = doc.createElement('Directory')
+            directory.setAttribute('Id', child_directory_id)
+            directory.setAttribute('Name', name)
+            directory_ref.appendChild(directory)
+
+        # Add <Component>s for files in this directory.
+        for rel, source_path in sorted(entries.items()):
+            if dir_name == '.':
+                full_rel = rel
+            else:
+                full_rel = '%s/%s' % (dir_name, rel)
 
-    lib_dir = dist_dir / 'lib'
+            component_unique_id = (
+                'https://www.mercurial-scm.org/wix-installer/0/component/%s'
+                % full_rel
+            )
+            component_guid = uuid.uuid5(uuid.NAMESPACE_URL, component_unique_id)
+            component_id = 'hg.component.%s' % str(component_guid).replace(
+                '-', '_'
+            )
+
+            component = doc.createElement('Component')
+
+            component.setAttribute('Id', component_id)
+            component.setAttribute('Guid', str(component_guid).upper())
+            component.setAttribute('Win64', 'yes' if is_x64 else 'no')
+
+            # Assign this component to a top-level group.
+            if dir_name == '.':
+                component_groups['ROOT'].append(component_id)
+            elif '/' in dir_name:
+                component_groups[dir_name[0 : dir_name.index('/')]].append(
+                    component_id
+                )
+            else:
+                component_groups[dir_name].append(component_id)
+
+            unique_id = (
+                'https://www.mercurial-scm.org/wix-installer/0/%s' % full_rel
+            )
+            file_guid = uuid.uuid5(uuid.NAMESPACE_URL, unique_id)
+
+            # IDs have length limits. So use GUID to derive them.
+            file_guid_normalized = str(file_guid).replace('-', '_')
+            file_id = 'hg.file.%s' % file_guid_normalized
 
-    for p in sorted(lib_dir.iterdir()):
-        if not p.name.endswith(('.dll', '.pyd')):
-            continue
+            file_element = doc.createElement('File')
+            file_element.setAttribute('Id', file_id)
+            file_element.setAttribute('Source', str(source_path))
+            file_element.setAttribute('KeyPath', 'yes')
+            file_element.setAttribute('ReadOnly', 'yes')
+
+            component.appendChild(file_element)
+            directory_ref.appendChild(component)
+
+        fragment.appendChild(directory_ref)
+        doc.documentElement.appendChild(fragment)
+
+    for group, component_ids in sorted(component_groups.items()):
+        fragment = doc.createElement('Fragment')
+        component_group = doc.createElement('ComponentGroup')
+        component_group.setAttribute('Id', 'hg.group.%s' % group)
+
+        for component_id in component_ids:
+            component_ref = doc.createElement('ComponentRef')
+            component_ref.setAttribute('Id', component_id)
+            component_group.appendChild(component_ref)
 
-        f = doc.createElement('File')
-        f.setAttribute('Name', p.name)
-        component.appendChild(f)
+        fragment.appendChild(component_group)
+        doc.documentElement.appendChild(fragment)
+
+    # Add <Shortcut> to files that have it defined.
+    for file_id, metadata in sorted(SHORTCUTS.items()):
+        els = doc.getElementsByTagName('File')
+        els = [el for el in els if el.getAttribute('Id') == file_id]
+
+        if not els:
+            raise Exception('could not find File[Id=%s]' % file_id)
+
+        for el in els:
+            shortcut = doc.createElement('Shortcut')
+            shortcut.setAttribute('Id', 'hg.shortcut.%s' % file_id)
+            shortcut.setAttribute('Directory', 'ProgramMenuDir')
+            shortcut.setAttribute('Icon', 'hgIcon.ico')
+            shortcut.setAttribute('IconIndex', '0')
+            shortcut.setAttribute('Advertise', 'yes')
+            for k, v in sorted(metadata.items()):
+                shortcut.setAttribute(k, v)
+
+            el.appendChild(shortcut)
 
     return doc.toprettyxml()
 
@@ -249,9 +386,27 @@
         post_build_fn(source_dir, hg_build_dir, dist_dir, version)
 
     build_dir = hg_build_dir / ('wix-%s' % arch)
+    staging_dir = build_dir / 'stage'
 
     build_dir.mkdir(exist_ok=True)
 
+    # Purge the staging directory for every build so packaging is pristine.
+    if staging_dir.exists():
+        print('purging %s' % staging_dir)
+        shutil.rmtree(staging_dir)
+
+    stage_install(source_dir, staging_dir, lower_case=True)
+
+    # We also install some extra files.
+    process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir)
+
+    # And remove some files we don't want.
+    for f in STAGING_REMOVE_FILES:
+        p = staging_dir / f
+        if p.exists():
+            print('removing %s' % p)
+            p.unlink()
+
     wix_pkg, wix_entry = download_entry('wix', hg_build_dir)
     wix_path = hg_build_dir / ('wix-%s' % wix_entry['version'])
 
@@ -264,25 +419,16 @@
 
     defines = {'Platform': arch}
 
-    for wxs, rel_path in SUPPORT_WXS:
-        wxs = wix_dir / wxs
-        wxs_source_dir = source_dir / rel_path
-        run_candle(wix_path, build_dir, wxs, wxs_source_dir, defines=defines)
+    # Derive a .wxs file with the staged files.
+    manifest_wxs = build_dir / 'stage.wxs'
+    with manifest_wxs.open('w', encoding='utf-8') as fh:
+        fh.write(make_files_xml(staging_dir, is_x64=arch == 'x64'))
+
+    run_candle(wix_path, build_dir, manifest_wxs, staging_dir, defines=defines)
 
     for source, rel_path in sorted((extra_wxs or {}).items()):
         run_candle(wix_path, build_dir, source, rel_path, defines=defines)
 
-    # candle.exe doesn't like when we have an open handle on the file.
-    # So use TemporaryDirectory() instead of NamedTemporaryFile().
-    with tempfile.TemporaryDirectory() as td:
-        td = pathlib.Path(td)
-
-        tf = td / 'library.wxs'
-        with tf.open('w') as fh:
-            fh.write(make_libraries_xml(wix_dir, dist_dir))
-
-        run_candle(wix_path, build_dir, tf, dist_dir, defines=defines)
-
     source = wix_dir / 'mercurial.wxs'
     defines['Version'] = version
     defines['Comments'] = 'Installs Mercurial version %s' % version
@@ -308,20 +454,13 @@
         str(msi_path),
     ]
 
-    for source, rel_path in SUPPORT_WXS:
-        assert source.endswith('.wxs')
-        args.append(str(build_dir / ('%s.wixobj' % source[:-4])))
-
     for source, rel_path in sorted((extra_wxs or {}).items()):
         assert source.endswith('.wxs')
         source = os.path.basename(source)
         args.append(str(build_dir / ('%s.wixobj' % source[:-4])))
 
     args.extend(
-        [
-            str(build_dir / 'library.wixobj'),
-            str(build_dir / 'mercurial.wixobj'),
-        ]
+        [str(build_dir / 'stage.wixobj'), str(build_dir / 'mercurial.wixobj'),]
     )
 
     subprocess.run(args, cwd=str(source_dir), check=True)
--- a/contrib/packaging/inno/build.py	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,60 +0,0 @@
-#!/usr/bin/env python3
-# build.py - Inno installer build script.
-#
-# Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-# This script automates the building of the Inno MSI installer for Mercurial.
-
-# no-check-code because Python 3 native.
-
-import argparse
-import os
-import pathlib
-import sys
-
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-
-    parser.add_argument(
-        '--python', required=True, help='path to python.exe to use'
-    )
-    parser.add_argument('--iscc', help='path to iscc.exe to use')
-    parser.add_argument(
-        '--version',
-        help='Mercurial version string to use '
-        '(detected from __version__.py if not defined',
-    )
-
-    args = parser.parse_args()
-
-    if not os.path.isabs(args.python):
-        raise Exception('--python arg must be an absolute path')
-
-    if args.iscc:
-        iscc = pathlib.Path(args.iscc)
-    else:
-        iscc = (
-            pathlib.Path(os.environ['ProgramFiles(x86)'])
-            / 'Inno Setup 5'
-            / 'ISCC.exe'
-        )
-
-    here = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
-    source_dir = here.parent.parent.parent
-    build_dir = source_dir / 'build'
-
-    sys.path.insert(0, str(source_dir / 'contrib' / 'packaging'))
-
-    from hgpackaging.inno import build
-
-    build(
-        source_dir,
-        build_dir,
-        pathlib.Path(args.python),
-        iscc,
-        version=args.version,
-    )
--- a/contrib/packaging/inno/mercurial.iss	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/inno/mercurial.iss	Tue Jan 21 13:14:51 2020 -0500
@@ -1,21 +1,6 @@
 ; Script generated by the Inno Setup Script Wizard.
 ; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES!
 
-#ifndef VERSION
-#define FileHandle
-#define FileLine
-#define VERSION = "unknown"
-#if FileHandle = FileOpen(SourcePath + "\..\..\..\mercurial\__version__.py")
-  #expr FileLine = FileRead(FileHandle)
-  #expr FileLine = FileRead(FileHandle)
-  #define VERSION = Copy(FileLine, Pos('"', FileLine)+1, Len(FileLine)-Pos('"', FileLine)-1)
-#endif
-#if FileHandle
-  #expr FileClose(FileHandle)
-#endif
-#pragma message "Detected Version: " + VERSION
-#endif
-
 #ifndef ARCH
 #define ARCH = "x86"
 #endif
@@ -33,68 +18,40 @@
 AppVerName=Mercurial {#VERSION}
 OutputBaseFilename=Mercurial-{#VERSION}
 #endif
-InfoAfterFile=contrib/win32/postinstall.txt
-LicenseFile=COPYING
+InfoAfterFile=../postinstall.txt
+LicenseFile=Copying.txt
 ShowLanguageDialog=yes
 AppPublisher=Matt Mackall and others
 AppPublisherURL=https://mercurial-scm.org/
 AppSupportURL=https://mercurial-scm.org/
 AppUpdatesURL=https://mercurial-scm.org/
-AppID={{4B95A5F1-EF59-4B08-BED8-C891C46121B3}
+{{ 'AppID={{4B95A5F1-EF59-4B08-BED8-C891C46121B3}' }}
 AppContact=mercurial@mercurial-scm.org
 DefaultDirName={pf}\Mercurial
-SourceDir=..\..\..
+SourceDir=stage
 VersionInfoDescription=Mercurial distributed SCM (version {#VERSION})
 VersionInfoCopyright=Copyright 2005-2019 Matt Mackall and others
 VersionInfoCompany=Matt Mackall and others
 InternalCompressLevel=max
 SolidCompression=true
-SetupIconFile=contrib\win32\mercurial.ico
+SetupIconFile=../mercurial.ico
 AllowNoIcons=true
 DefaultGroupName=Mercurial
 PrivilegesRequired=none
 ChangesEnvironment=true
 
 [Files]
-Source: contrib\mercurial.el; DestDir: {app}/Contrib
-Source: contrib\vim\*.*; DestDir: {app}/Contrib/Vim
-Source: contrib\zsh_completion; DestDir: {app}/Contrib
-Source: contrib\bash_completion; DestDir: {app}/Contrib
-Source: contrib\tcsh_completion; DestDir: {app}/Contrib
-Source: contrib\tcsh_completion_build.sh; DestDir: {app}/Contrib
-Source: contrib\hgk; DestDir: {app}/Contrib; DestName: hgk.tcl
-Source: contrib\xml.rnc; DestDir: {app}/Contrib
-Source: contrib\mercurial.el; DestDir: {app}/Contrib
-Source: contrib\mq.el; DestDir: {app}/Contrib
-Source: contrib\hgweb.fcgi; DestDir: {app}/Contrib
-Source: contrib\hgweb.wsgi; DestDir: {app}/Contrib
-Source: contrib\win32\ReadMe.html; DestDir: {app}; Flags: isreadme
-Source: contrib\win32\postinstall.txt; DestDir: {app}; DestName: ReleaseNotes.txt
-Source: dist\hg.exe; DestDir: {app}; AfterInstall: Touch('{app}\hg.exe.local')
-Source: dist\lib\*.dll; Destdir: {app}\lib
-Source: dist\lib\*.pyd; Destdir: {app}\lib
-Source: dist\python*.dll; Destdir: {app}; Flags: skipifsourcedoesntexist
-Source: dist\msvc*.dll; DestDir: {app}; Flags: skipifsourcedoesntexist
-Source: dist\Microsoft.VC*.CRT.manifest; DestDir: {app}; Flags: skipifsourcedoesntexist
-Source: dist\lib\library.zip; DestDir: {app}\lib
-Source: doc\*.html; DestDir: {app}\Docs
-Source: doc\style.css; DestDir: {app}\Docs
-Source: mercurial\help\*.txt; DestDir: {app}\help
-Source: mercurial\help\internals\*.txt; DestDir: {app}\help\internals
-Source: mercurial\default.d\*.rc; DestDir: {app}\default.d
-Source: mercurial\locale\*.*; DestDir: {app}\locale; Flags: recursesubdirs createallsubdirs skipifsourcedoesntexist
-Source: mercurial\templates\*.*; DestDir: {app}\Templates; Flags: recursesubdirs createallsubdirs
-Source: CONTRIBUTORS; DestDir: {app}; DestName: Contributors.txt
-Source: COPYING; DestDir: {app}; DestName: Copying.txt
+{% for entry in package_files -%}
+Source: {{ entry.source }}; DestDir: {{ entry.dest_dir }}
+{%- if entry.metadata %}; {{ entry.metadata }}{% endif %}
+{% endfor %}
 
 [INI]
 Filename: {app}\Mercurial.url; Section: InternetShortcut; Key: URL; String: https://mercurial-scm.org/
-Filename: {app}\default.d\editor.rc; Section: ui; Key: editor; String: notepad
 
 [UninstallDelete]
 Type: files; Name: {app}\Mercurial.url
-Type: filesandordirs; Name: {app}\default.d
-Type: files; Name: "{app}\hg.exe.local"
+Type: filesandordirs; Name: {app}\hgrc.d
 
 [Icons]
 Name: {group}\Uninstall Mercurial; Filename: {uninstallexe}
@@ -121,4 +78,5 @@
     setArrayLength(Result, 1)
     Result[0] := ExpandConstant('{app}');
 end;
-#include "modpath.iss"
+
+{% include 'modpath.iss' %}
--- a/contrib/packaging/inno/modpath.iss	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/inno/modpath.iss	Tue Jan 21 13:14:51 2020 -0500
@@ -68,79 +68,42 @@
 	for d := 0 to GetArrayLength(pathdir)-1 do begin
 		updatepath := true;
 
-		// Modify WinNT path
-		if UsingWinNT() = true then begin
-
-			// Get current path, split into an array
-			RegQueryStringValue(regroot, regpath, 'Path', oldpath);
-			oldpath := oldpath + ';';
-			i := 0;
-
-			while (Pos(';', oldpath) > 0) do begin
-				SetArrayLength(pathArr, i+1);
-				pathArr[i] := Copy(oldpath, 0, Pos(';', oldpath)-1);
-				oldpath := Copy(oldpath, Pos(';', oldpath)+1, Length(oldpath));
-				i := i + 1;
+		// Get current path, split into an array
+		RegQueryStringValue(regroot, regpath, 'Path', oldpath);
+		oldpath := oldpath + ';';
+		i := 0;
 
-				// Check if current directory matches app dir
-				if pathdir[d] = pathArr[i-1] then begin
-					// if uninstalling, remove dir from path
-					if IsUninstaller() = true then begin
-						continue;
-					// if installing, flag that dir already exists in path
-					end else begin
-						updatepath := false;
-					end;
-				end;
+		while (Pos(';', oldpath) > 0) do begin
+			SetArrayLength(pathArr, i+1);
+			pathArr[i] := Copy(oldpath, 0, Pos(';', oldpath)-1);
+			oldpath := Copy(oldpath, Pos(';', oldpath)+1, Length(oldpath));
+			i := i + 1;
 
-				// Add current directory to new path
-				if i = 1 then begin
-					newpath := pathArr[i-1];
+			// Check if current directory matches app dir
+			if pathdir[d] = pathArr[i-1] then begin
+				// if uninstalling, remove dir from path
+				if IsUninstaller() = true then begin
+					continue;
+				// if installing, flag that dir already exists in path
 				end else begin
-					newpath := newpath + ';' + pathArr[i-1];
+					updatepath := false;
 				end;
 			end;
 
-			// Append app dir to path if not already included
-			if (IsUninstaller() = false) AND (updatepath = true) then
-				newpath := newpath + ';' + pathdir[d];
-
-			// Write new path
-			RegWriteStringValue(regroot, regpath, 'Path', newpath);
-
-		// Modify Win9x path
-		end else begin
-
-			// Convert to shortened dirname
-			pathdir[d] := GetShortName(pathdir[d]);
-
-			// If autoexec.bat exists, check if app dir already exists in path
-			aExecFile := 'C:\AUTOEXEC.BAT';
-			if FileExists(aExecFile) then begin
-				LoadStringsFromFile(aExecFile, aExecArr);
-				for i := 0 to GetArrayLength(aExecArr)-1 do begin
-					if IsUninstaller() = false then begin
-						// If app dir already exists while installing, skip add
-						if (Pos(pathdir[d], aExecArr[i]) > 0) then
-							updatepath := false;
-							break;
-					end else begin
-						// If app dir exists and = what we originally set, then delete at uninstall
-						if aExecArr[i] = 'SET PATH=%PATH%;' + pathdir[d] then
-							aExecArr[i] := '';
-					end;
-				end;
-			end;
-
-			// If app dir not found, or autoexec.bat didn't exist, then (create and) append to current path
-			if (IsUninstaller() = false) AND (updatepath = true) then begin
-				SaveStringToFile(aExecFile, #13#10 + 'SET PATH=%PATH%;' + pathdir[d], True);
-
-			// If uninstalling, write the full autoexec out
+			// Add current directory to new path
+			if i = 1 then begin
+				newpath := pathArr[i-1];
 			end else begin
-				SaveStringsToFile(aExecFile, aExecArr, False);
+				newpath := newpath + ';' + pathArr[i-1];
 			end;
 		end;
+
+		// Append app dir to path if not already included
+		if (IsUninstaller() = false) AND (updatepath = true) then
+			newpath := newpath + ';' + pathdir[d];
+
+		// Write new path
+		RegWriteStringValue(regroot, regpath, 'Path', newpath);
 	end;
 end;
 
@@ -207,13 +170,6 @@
 end;
 
 function NeedRestart(): Boolean;
-var
-	taskname:	String;
 begin
-	taskname := ModPathName;
-	if IsTaskSelected(taskname) and not UsingWinNT() then begin
-		Result := True;
-	end else begin
-		Result := False;
-	end;
+	Result := False;
 end;
--- a/contrib/packaging/inno/readme.rst	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/inno/readme.rst	Tue Jan 21 13:14:51 2020 -0500
@@ -11,12 +11,12 @@
 * Inno Setup (http://jrsoftware.org/isdl.php) version 5.4 or newer.
   Be sure to install the optional Inno Setup Preprocessor feature,
   which is required.
-* Python 3.5+ (to run the ``build.py`` script)
+* Python 3.5+ (to run the ``packaging.py`` script)
 
 Building
 ========
 
-The ``build.py`` script automates the process of producing an
+The ``packaging.py`` script automates the process of producing an
 Inno installer. It manages fetching and configuring the
 non-system dependencies (such as py2exe, gettext, and various
 Python packages).
@@ -31,11 +31,11 @@
 From the prompt, change to the Mercurial source directory. e.g.
 ``cd c:\src\hg``.
 
-Next, invoke ``build.py`` to produce an Inno installer. You will
+Next, invoke ``packaging.py`` to produce an Inno installer. You will
 need to supply the path to the Python interpreter to use.::
 
-   $ python3.exe contrib\packaging\inno\build.py \
-       --python c:\python27\python.exe
+   $ python3.exe contrib\packaging\packaging.py \
+       inno --python c:\python27\python.exe
 
 .. note::
 
@@ -49,13 +49,13 @@
 and an installer placed in the ``dist`` sub-directory. The final
 line of output should print the name of the generated installer.
 
-Additional options may be configured. Run ``build.py --help`` to
-see a list of program flags.
+Additional options may be configured. Run
+``packaging.py inno --help`` to see a list of program flags.
 
 MinGW
 =====
 
 It is theoretically possible to generate an installer that uses
-MinGW. This isn't well tested and ``build.py`` and may properly
+MinGW. This isn't well tested and ``packaging.py`` and may properly
 support it. See old versions of this file in version control for
 potentially useful hints as to how to achieve this.
--- a/contrib/packaging/inno/requirements.txt	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/inno/requirements.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -8,65 +8,6 @@
     --hash=sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50 \
     --hash=sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef \
     # via dulwich
-cffi==1.13.1 \
-    --hash=sha256:00d890313797d9fe4420506613384b43099ad7d2b905c0752dbcc3a6f14d80fa \
-    --hash=sha256:0cf9e550ac6c5e57b713437e2f4ac2d7fd0cd10336525a27224f5fc1ec2ee59a \
-    --hash=sha256:0ea23c9c0cdd6778146a50d867d6405693ac3b80a68829966c98dd5e1bbae400 \
-    --hash=sha256:193697c2918ecdb3865acf6557cddf5076bb39f1f654975e087b67efdff83365 \
-    --hash=sha256:1ae14b542bf3b35e5229439c35653d2ef7d8316c1fffb980f9b7647e544baa98 \
-    --hash=sha256:1e389e069450609c6ffa37f21f40cce36f9be7643bbe5051ab1de99d5a779526 \
-    --hash=sha256:263242b6ace7f9cd4ea401428d2d45066b49a700852334fd55311bde36dcda14 \
-    --hash=sha256:33142ae9807665fa6511cfa9857132b2c3ee6ddffb012b3f0933fc11e1e830d5 \
-    --hash=sha256:364f8404034ae1b232335d8c7f7b57deac566f148f7222cef78cf8ae28ef764e \
-    --hash=sha256:47368f69fe6529f8f49a5d146ddee713fc9057e31d61e8b6dc86a6a5e38cecc1 \
-    --hash=sha256:4895640844f17bec32943995dc8c96989226974dfeb9dd121cc45d36e0d0c434 \
-    --hash=sha256:558b3afef987cf4b17abd849e7bedf64ee12b28175d564d05b628a0f9355599b \
-    --hash=sha256:5ba86e1d80d458b338bda676fd9f9d68cb4e7a03819632969cf6d46b01a26730 \
-    --hash=sha256:63424daa6955e6b4c70dc2755897f5be1d719eabe71b2625948b222775ed5c43 \
-    --hash=sha256:6381a7d8b1ebd0bc27c3bc85bc1bfadbb6e6f756b4d4db0aa1425c3719ba26b4 \
-    --hash=sha256:6381ab708158c4e1639da1f2a7679a9bbe3e5a776fc6d1fd808076f0e3145331 \
-    --hash=sha256:6fd58366747debfa5e6163ada468a90788411f10c92597d3b0a912d07e580c36 \
-    --hash=sha256:728ec653964655d65408949b07f9b2219df78badd601d6c49e28d604efe40599 \
-    --hash=sha256:7cfcfda59ef1f95b9f729c56fe8a4041899f96b72685d36ef16a3440a0f85da8 \
-    --hash=sha256:819f8d5197c2684524637f940445c06e003c4a541f9983fd30d6deaa2a5487d8 \
-    --hash=sha256:825ecffd9574557590e3225560a8a9d751f6ffe4a49e3c40918c9969b93395fa \
-    --hash=sha256:9009e917d8f5ef780c2626e29b6bc126f4cb2a4d43ca67aa2b40f2a5d6385e78 \
-    --hash=sha256:9c77564a51d4d914ed5af096cd9843d90c45b784b511723bd46a8a9d09cf16fc \
-    --hash=sha256:a19089fa74ed19c4fe96502a291cfdb89223a9705b1d73b3005df4256976142e \
-    --hash=sha256:a40ed527bffa2b7ebe07acc5a3f782da072e262ca994b4f2085100b5a444bbb2 \
-    --hash=sha256:bb75ba21d5716abc41af16eac1145ab2e471deedde1f22c6f99bd9f995504df0 \
-    --hash=sha256:e22a00c0c81ffcecaf07c2bfb3672fa372c50e2bd1024ffee0da191c1b27fc71 \
-    --hash=sha256:e55b5a746fb77f10c83e8af081979351722f6ea48facea79d470b3731c7b2891 \
-    --hash=sha256:ec2fa3ee81707a5232bf2dfbd6623fdb278e070d596effc7e2d788f2ada71a05 \
-    --hash=sha256:fd82eb4694be712fcae03c717ca2e0fc720657ac226b80bbb597e971fc6928c2 \
-    # via cryptography
-configparser==4.0.2 \
-    --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \
-    --hash=sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df \
-    # via entrypoints
-cryptography==2.8 \
-    --hash=sha256:02079a6addc7b5140ba0825f542c0869ff4df9a69c360e339ecead5baefa843c \
-    --hash=sha256:1df22371fbf2004c6f64e927668734070a8953362cd8370ddd336774d6743595 \
-    --hash=sha256:369d2346db5934345787451504853ad9d342d7f721ae82d098083e1f49a582ad \
-    --hash=sha256:3cda1f0ed8747339bbdf71b9f38ca74c7b592f24f65cdb3ab3765e4b02871651 \
-    --hash=sha256:44ff04138935882fef7c686878e1c8fd80a723161ad6a98da31e14b7553170c2 \
-    --hash=sha256:4b1030728872c59687badcca1e225a9103440e467c17d6d1730ab3d2d64bfeff \
-    --hash=sha256:58363dbd966afb4f89b3b11dfb8ff200058fbc3b947507675c19ceb46104b48d \
-    --hash=sha256:6ec280fb24d27e3d97aa731e16207d58bd8ae94ef6eab97249a2afe4ba643d42 \
-    --hash=sha256:7270a6c29199adc1297776937a05b59720e8a782531f1f122f2eb8467f9aab4d \
-    --hash=sha256:73fd30c57fa2d0a1d7a49c561c40c2f79c7d6c374cc7750e9ac7c99176f6428e \
-    --hash=sha256:7f09806ed4fbea8f51585231ba742b58cbcfbfe823ea197d8c89a5e433c7e912 \
-    --hash=sha256:90df0cc93e1f8d2fba8365fb59a858f51a11a394d64dbf3ef844f783844cc793 \
-    --hash=sha256:971221ed40f058f5662a604bd1ae6e4521d84e6cad0b7b170564cc34169c8f13 \
-    --hash=sha256:a518c153a2b5ed6b8cc03f7ae79d5ffad7315ad4569b2d5333a13c38d64bd8d7 \
-    --hash=sha256:b0de590a8b0979649ebeef8bb9f54394d3a41f66c5584fff4220901739b6b2f0 \
-    --hash=sha256:b43f53f29816ba1db8525f006fa6f49292e9b029554b3eb56a189a70f2a40879 \
-    --hash=sha256:d31402aad60ed889c7e57934a03477b572a03af7794fa8fb1780f21ea8f6551f \
-    --hash=sha256:de96157ec73458a7f14e3d26f17f8128c959084931e8997b9e655a39c8fde9f9 \
-    --hash=sha256:df6b4dca2e11865e6cfbfb708e800efb18370f5a46fd601d3755bc7f85b3a8a2 \
-    --hash=sha256:ecadccc7ba52193963c0475ac9f6fa28ac01e01349a2ca48509667ef41ffd2cf \
-    --hash=sha256:fb81c17e0ebe3358486cd8cc3ad78adbae58af12fc2bf2bc0bb84e8090fa5ce8 \
-    # via secretstorage
 docutils==0.15.2 \
     --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
     --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
@@ -85,35 +26,16 @@
     --hash=sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19 \
     --hash=sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451 \
     # via keyring
-enum34==1.1.6 \
-    --hash=sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850 \
-    --hash=sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a \
-    --hash=sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79 \
-    --hash=sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1 \
-    # via cryptography
-ipaddress==1.0.23 \
-    --hash=sha256:6e0f4a39e66cb5bb9a137b00276a2eff74f93b71dcbdad6f10ff7df9d3557fcc \
-    --hash=sha256:b7f8e0369580bb4a24d5ba1d7cc29660a4a6987763faf1d8a8046830e020e7e2 \
-    # via cryptography
 keyring==18.0.1 \
     --hash=sha256:67d6cc0132bd77922725fae9f18366bb314fd8f95ff4d323a4df41890a96a838 \
     --hash=sha256:7b29ebfcf8678c4da531b2478a912eea01e80007e5ddca9ee0c7038cb3489ec6
-pycparser==2.19 \
-    --hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \
-    # via cffi
 pygments==2.4.2 \
     --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
     --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297
 pywin32-ctypes==0.2.0 \
     --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \
-    --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98
-secretstorage==2.3.1 \
-    --hash=sha256:3af65c87765323e6f64c83575b05393f9e003431959c9395d1791d51497f29b6 \
+    --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \
     # via keyring
-six==1.12.0 \
-    --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
-    --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
-    # via cryptography
 urllib3==1.25.6 \
     --hash=sha256:3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398 \
     --hash=sha256:9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86 \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/packaging/packaging.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,74 @@
+#!/usr/bin/env python3
+#
+# packaging.py - Mercurial packaging functionality
+#
+# Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os
+import pathlib
+import subprocess
+import sys
+import venv
+
+
+HERE = pathlib.Path(os.path.abspath(__file__)).parent
+REQUIREMENTS_TXT = HERE / "requirements.txt"
+SOURCE_DIR = HERE.parent.parent
+VENV = SOURCE_DIR / "build" / "venv-packaging"
+
+
+def bootstrap():
+    venv_created = not VENV.exists()
+
+    VENV.parent.mkdir(exist_ok=True)
+
+    venv.create(VENV, with_pip=True)
+
+    if os.name == "nt":
+        venv_bin = VENV / "Scripts"
+        pip = venv_bin / "pip.exe"
+        python = venv_bin / "python.exe"
+    else:
+        venv_bin = VENV / "bin"
+        pip = venv_bin / "pip"
+        python = venv_bin / "python"
+
+    args = [
+        str(pip),
+        "install",
+        "-r",
+        str(REQUIREMENTS_TXT),
+        "--disable-pip-version-check",
+    ]
+
+    if not venv_created:
+        args.append("-q")
+
+    subprocess.run(args, check=True)
+
+    os.environ["HGPACKAGING_BOOTSTRAPPED"] = "1"
+    os.environ["PATH"] = "%s%s%s" % (venv_bin, os.pathsep, os.environ["PATH"])
+
+    subprocess.run([str(python), __file__] + sys.argv[1:], check=True)
+
+
+def run():
+    import hgpackaging.cli as cli
+
+    # Need to strip off main Python executable.
+    cli.main()
+
+
+if __name__ == "__main__":
+    try:
+        if "HGPACKAGING_BOOTSTRAPPED" not in os.environ:
+            bootstrap()
+        else:
+            run()
+    except subprocess.CalledProcessError as e:
+        sys.exit(e.returncode)
+    except KeyboardInterrupt:
+        sys.exit(1)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/packaging/requirements.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,39 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+#    pip-compile --generate-hashes --output-file=contrib/packaging/requirements.txt contrib/packaging/requirements.txt.in
+#
+jinja2==2.10.3 \
+    --hash=sha256:74320bb91f31270f9551d46522e33af46a80c3d619f4a4bf42b3164d30b5911f \
+    --hash=sha256:9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de
+markupsafe==1.1.1 \
+    --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \
+    --hash=sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161 \
+    --hash=sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235 \
+    --hash=sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5 \
+    --hash=sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff \
+    --hash=sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b \
+    --hash=sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1 \
+    --hash=sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e \
+    --hash=sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183 \
+    --hash=sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66 \
+    --hash=sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1 \
+    --hash=sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1 \
+    --hash=sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e \
+    --hash=sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b \
+    --hash=sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905 \
+    --hash=sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735 \
+    --hash=sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d \
+    --hash=sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e \
+    --hash=sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d \
+    --hash=sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c \
+    --hash=sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21 \
+    --hash=sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2 \
+    --hash=sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5 \
+    --hash=sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b \
+    --hash=sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6 \
+    --hash=sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f \
+    --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \
+    --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \
+    # via jinja2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/packaging/requirements.txt.in	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,1 @@
+jinja2
--- a/contrib/packaging/wix/build.py	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,96 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-# no-check-code because Python 3 native.
-
-"""Code to build Mercurial WiX installer."""
-
-import argparse
-import os
-import pathlib
-import sys
-
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-
-    parser.add_argument('--name', help='Application name', default='Mercurial')
-    parser.add_argument(
-        '--python', help='Path to Python executable to use', required=True
-    )
-    parser.add_argument(
-        '--sign-sn',
-        help='Subject name (or fragment thereof) of certificate '
-        'to use for signing',
-    )
-    parser.add_argument(
-        '--sign-cert', help='Path to certificate to use for signing'
-    )
-    parser.add_argument(
-        '--sign-password', help='Password for signing certificate'
-    )
-    parser.add_argument(
-        '--sign-timestamp-url',
-        help='URL of timestamp server to use for signing',
-    )
-    parser.add_argument('--version', help='Version string to use')
-    parser.add_argument(
-        '--extra-packages-script',
-        help=(
-            'Script to execute to include extra packages in ' 'py2exe binary.'
-        ),
-    )
-    parser.add_argument(
-        '--extra-wxs', help='CSV of path_to_wxs_file=working_dir_for_wxs_file'
-    )
-    parser.add_argument(
-        '--extra-features',
-        help=(
-            'CSV of extra feature names to include '
-            'in the installer from the extra wxs files'
-        ),
-    )
-
-    args = parser.parse_args()
-
-    here = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
-    source_dir = here.parent.parent.parent
-
-    sys.path.insert(0, str(source_dir / 'contrib' / 'packaging'))
-
-    from hgpackaging.wix import (
-        build_installer,
-        build_signed_installer,
-    )
-
-    fn = build_installer
-    kwargs = {
-        'source_dir': source_dir,
-        'python_exe': pathlib.Path(args.python),
-        'version': args.version,
-    }
-
-    if not os.path.isabs(args.python):
-        raise Exception('--python arg must be an absolute path')
-
-    if args.extra_packages_script:
-        kwargs['extra_packages_script'] = args.extra_packages_script
-    if args.extra_wxs:
-        kwargs['extra_wxs'] = dict(
-            thing.split("=") for thing in args.extra_wxs.split(',')
-        )
-    if args.extra_features:
-        kwargs['extra_features'] = args.extra_features.split(',')
-
-    if args.sign_sn or args.sign_cert:
-        fn = build_signed_installer
-        kwargs['name'] = args.name
-        kwargs['subject_name'] = args.sign_sn
-        kwargs['cert_path'] = args.sign_cert
-        kwargs['cert_password'] = args.sign_password
-        kwargs['timestamp_url'] = args.sign_timestamp_url
-
-    fn(**kwargs)
--- a/contrib/packaging/wix/contrib.wxs	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
-
-  <?include guids.wxi ?>
-  <?include defines.wxi ?>
-
-  <Fragment>
-    <ComponentGroup Id="contribFolder">
-      <ComponentRef Id="contrib" />
-      <ComponentRef Id="contrib.vim" />
-    </ComponentGroup>
-  </Fragment>
-
-  <Fragment>
-    <DirectoryRef Id="INSTALLDIR">
-      <Directory Id="contribdir" Name="contrib" FileSource="$(var.SourceDir)">
-        <Component Id="contrib" Guid="$(var.contrib.guid)" Win64='$(var.IsX64)'>
-          <File Name="bash_completion" KeyPath="yes" />
-          <File Name="hgk" />
-          <File Name="hgweb.fcgi" />
-          <File Name="hgweb.wsgi" />
-          <File Name="logo-droplets.svg" />
-          <File Name="mercurial.el" />
-          <File Name="tcsh_completion" />
-          <File Name="tcsh_completion_build.sh" />
-          <File Name="xml.rnc" />
-          <File Name="zsh_completion" />
-        </Component>
-        <Directory Id="vimdir" Name="vim">
-          <Component Id="contrib.vim" Guid="$(var.contrib.vim.guid)" Win64='$(var.IsX64)'>
-            <File Name="hg-menu.vim" KeyPath="yes" />
-            <File Name="HGAnnotate.vim" />
-            <File Name="hgcommand.vim" />
-            <File Name="patchreview.txt" />
-            <File Name="patchreview.vim" />
-            <File Name="hgtest.vim" />
-          </Component>
-        </Directory>
-      </Directory>
-    </DirectoryRef>
-  </Fragment>
-
-</Wix>
--- a/contrib/packaging/wix/dist.wxs	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,15 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
-
-  <?include guids.wxi ?>
-  <?include defines.wxi ?>
-
-  <Fragment>
-    <DirectoryRef Id="INSTALLDIR" FileSource="$(var.SourceDir)">
-      <Component Id="distOutput" Guid="$(var.dist.guid)" Win64='$(var.IsX64)'>
-        <File Name="python27.dll" KeyPath="yes" />
-      </Component>
-    </DirectoryRef>
-  </Fragment>
-
-</Wix>
--- a/contrib/packaging/wix/doc.wxs	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
-
-  <?include guids.wxi ?>
-  <?include defines.wxi ?>
-
-  <Fragment>
-    <ComponentGroup Id="docFolder">
-      <ComponentRef Id="doc.hg.1.html" />
-      <ComponentRef Id="doc.hgignore.5.html" />
-      <ComponentRef Id="doc.hgrc.5.html" />
-      <ComponentRef Id="doc.style.css" />
-    </ComponentGroup>
-  </Fragment>
-
-  <Fragment>
-    <DirectoryRef Id="INSTALLDIR">
-      <Directory Id="docdir" Name="doc" FileSource="$(var.SourceDir)">
-        <Component Id="doc.hg.1.html" Guid="$(var.doc.hg.1.html.guid)" Win64='$(var.IsX64)'>
-          <File Name="hg.1.html" KeyPath="yes">
-            <Shortcut Id="hg1StartMenu" Directory="ProgramMenuDir"
-                      Name="Mercurial Command Reference"
-                      Icon="hgIcon.ico" IconIndex="0" Advertise="yes"
-            />
-          </File>
-        </Component>
-        <Component Id="doc.hgignore.5.html" Guid="$(var.doc.hgignore.5.html.guid)" Win64='$(var.IsX64)'>
-          <File Name="hgignore.5.html" KeyPath="yes">
-            <Shortcut Id="hgignore5StartMenu" Directory="ProgramMenuDir"
-                      Name="Mercurial Ignore Files"
-                      Icon="hgIcon.ico" IconIndex="0" Advertise="yes"
-            />
-          </File>
-        </Component>
-        <Component Id="doc.hgrc.5.html" Guid="$(var.doc.hgrc.5.html)" Win64='$(var.IsX64)'>
-          <File Name="hgrc.5.html" KeyPath="yes">
-            <Shortcut Id="hgrc5StartMenu" Directory="ProgramMenuDir"
-                      Name="Mercurial Configuration Files"
-                      Icon="hgIcon.ico" IconIndex="0" Advertise="yes"
-            />
-          </File>
-        </Component>
-        <Component Id="doc.style.css" Guid="$(var.doc.style.css)" Win64='$(var.IsX64)'>
-          <File Name="style.css" KeyPath="yes" />
-        </Component>
-      </Directory>
-    </DirectoryRef>
-  </Fragment>
-
-</Wix>
--- a/contrib/packaging/wix/guids.wxi	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/wix/guids.wxi	Tue Jan 21 13:14:51 2020 -0500
@@ -4,49 +4,9 @@
        and replace 'Mercurial' in this notice with the name of
        your project. Component GUIDs have global namespace!      -->
 
-  <!-- contrib.wxs -->
-  <?define contrib.guid = {4E11FFC2-E2F7-482A-8460-9394B5489F02} ?>
-  <?define contrib.vim.guid = {BB04903A-652D-4C4F-9590-2BD07A2304F2} ?>
-
-  <!-- dist.wxs -->
-  <?define dist.guid = {CE405FE6-CD1E-4873-9C9A-7683AE5A3D90} ?>
-  <?define lib.guid = {877633b5-0b7e-4b46-8f1c-224a61733297} ?>
-
-  <!-- doc.wxs -->
-  <?define doc.hg.1.html.guid = {AAAA3FDA-EDC5-4220-B59D-D342722358A2} ?>
-  <?define doc.hgignore.5.html.guid = {AA9118C4-F3A0-4429-A5F4-5A1906B2D67F} ?>
-  <?define doc.hgrc.5.html = {E0CEA1EB-FA01-408c-844B-EE5965165BAE} ?>
-  <?define doc.style.css = {172F8262-98E0-4711-BD39-4DAE0D77EF05} ?>
-
-  <!-- help.wxs -->
-  <?define help.root.guid = {9FA957DB-6DFE-44f2-AD03-293B2791CF17} ?>
-  <?define help.internals.guid = {2DD7669D-0DB8-4C39-9806-78E6475E7ACC} ?>
-
-  <!-- i18n.wxs -->
-  <?define i18nFolder.guid = {1BF8026D-CF7C-4174-AEE6-D6B7BF119248} ?>
-
-  <!-- templates.wxs -->
-  <?define templates.root.guid = {437FD55C-7756-4EA0-87E5-FDBE75DC8595} ?>
-  <?define templates.atom.guid = {D30E14A5-8AF0-4268-8B00-00BEE9E09E39} ?>
-  <?define templates.coal.guid = {B63CCAAB-4EAF-43b4-901E-4BD13F5B78FC} ?>
-  <?define templates.gitweb.guid = {827334AF-1EFD-421B-962C-5660A068F612} ?>
-  <?define templates.json.guid = {F535BE7A-EC34-46E0-B9BE-013F3DBAFB19} ?>
-  <?define templates.monoblue.guid = {8060A1E4-BD4C-453E-92CB-9536DC44A9E3} ?>
-  <?define templates.paper.guid = {61AB1DE9-645F-46ED-8AF8-0CF02267FFBB} ?>
-  <?define templates.raw.guid = {834DF8D7-9784-43A6-851D-A96CE1B3575B} ?>
-  <?define templates.rss.guid = {9338FA09-E128-4B1C-B723-1142DBD09E14} ?>
-  <?define templates.spartan.guid = {80222625-FA8F-44b1-86CE-1781EF375D09} ?>
-  <?define templates.static.guid = {6B3D7C24-98DA-4B67-9F18-35F77357B0B4} ?>
-
   <!-- mercurial.wxs -->
   <?define ProductUpgradeCode = {A1CC6134-E945-4399-BE36-EB0017FDF7CF} ?>
-
   <?define ComponentMainExecutableGUID = {D102B8FA-059B-4ACC-9FA3-8C78C3B58EEF} ?>
-
-  <?define ReadMe.guid = {56A8E372-991D-4DCA-B91D-93D775974CF5} ?>
-  <?define COPYING.guid = {B7801DBA-1C49-4BF4-91AD-33C65F5C7895} ?>
-  <?define mercurial.rc.guid = {1D5FAEEE-7E6E-43B1-9F7F-802714316B15} ?>
-  <?define mergetools.rc.guid = {E8A1DC29-FF40-4B5F-BD12-80B9F7BF0CCD} ?>
   <?define ProgramMenuDir.guid = {D5A63320-1238-489B-B68B-CF053E9577CA} ?>
 
 </Include>
--- a/contrib/packaging/wix/help.wxs	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
-
-  <?include guids.wxi ?>
-  <?include defines.wxi ?>
-
-  <Fragment>
-    <ComponentGroup Id='helpFolder'>
-      <ComponentRef Id='help.root' />
-      <ComponentRef Id='help.internals' />
-    </ComponentGroup>
-  </Fragment>
-
-  <Fragment>
-    <DirectoryRef Id="INSTALLDIR">
-      <Directory Id="helpdir" Name="help" FileSource="$(var.SourceDir)">
-        <Component Id="help.root" Guid="$(var.help.root.guid)" Win64='$(var.IsX64)'>
-          <File Name="bundlespec.txt" />
-          <File Name="color.txt" />
-          <File Name="config.txt" KeyPath="yes" />
-          <File Name="dates.txt" />
-          <File Name="deprecated.txt" />
-          <File Name="diffs.txt" />
-          <File Name="environment.txt" />
-          <File Name="extensions.txt" />
-          <File Name="filesets.txt" />
-          <File Name="flags.txt" />
-          <File Name="glossary.txt" />
-          <File Name="hgignore.txt" />
-          <File Name="hgweb.txt" />
-          <File Name="merge-tools.txt" />
-          <File Name="pager.txt" />
-          <File Name="patterns.txt" />
-          <File Name="phases.txt" />
-          <File Name="revisions.txt" />
-          <File Name="scripting.txt" />
-          <File Name="subrepos.txt" />
-          <File Name="templates.txt" />
-          <File Name="urls.txt" />
-        </Component>
-
-        <Directory Id="help.internaldir" Name="internals">
-          <Component Id="help.internals" Guid="$(var.help.internals.guid)" Win64='$(var.IsX64)'>
-            <File Id="internals.bundle2.txt"      Name="bundle2.txt" />
-            <File Id="internals.bundles.txt"      Name="bundles.txt" KeyPath="yes" />
-            <File Id="internals.cbor.txt"         Name="cbor.txt" />
-            <File Id="internals.censor.txt"       Name="censor.txt" />
-            <File Id="internals.changegroups.txt" Name="changegroups.txt" />
-            <File Id="internals.config.txt"       Name="config.txt" />
-            <File Id="internals.extensions.txt"   Name="extensions.txt" />
-            <File Id="internals.linelog.txt"      Name="linelog.txt" />
-            <File Id="internals.mergestate.txt"   Name="mergestate.txt" />
-            <File Id="internals.requirements.txt" Name="requirements.txt" />
-            <File Id="internals.revlogs.txt"      Name="revlogs.txt" />
-            <File Id="internals.wireprotocol.txt" Name="wireprotocol.txt" />
-            <File Id="internals.wireprotocolrpc.txt" Name="wireprotocolrpc.txt" />
-            <File Id="internals.wireprotocolv2.txt" Name="wireprotocolv2.txt" />
-          </Component>
-        </Directory>
-
-      </Directory>
-    </DirectoryRef>
-  </Fragment>
-
-</Wix>
--- a/contrib/packaging/wix/i18n.wxs	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
-
-  <?include guids.wxi ?>
-  <?include defines.wxi ?>
-
-  <?define hg_po_langs =
-    da;de;el;fr;it;ja;pt_BR;ro;ru;sv;zh_CN;zh_TW
-  ?>
-
-  <Fragment>
-    <DirectoryRef Id="INSTALLDIR">
-      <Directory Id="i18ndir" Name="i18n" FileSource="$(var.SourceDir)">
-        <Component Id="i18nFolder" Guid="$(var.i18nFolder.guid)" Win64='$(var.IsX64)'>
-          <File Name="hggettext" KeyPath="yes" />
-          <?foreach LANG in $(var.hg_po_langs) ?>
-            <File Id="hg.$(var.LANG).po"
-                  Name="$(var.LANG).po"
-            />
-          <?endforeach?>
-        </Component>
-      </Directory>
-    </DirectoryRef>
-  </Fragment>
-
-</Wix>
--- a/contrib/packaging/wix/locale.wxs	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
-
-  <?include defines.wxi ?>
-
-  <?define hglocales =
-    da;de;el;fr;it;ja;pt_BR;ro;ru;sv;zh_CN;zh_TW
-  ?>
-
-  <Fragment>
-    <ComponentGroup Id="localeFolder">
-      <?foreach LOC in $(var.hglocales) ?>
-        <ComponentRef Id="hg.locale.$(var.LOC)"/>
-      <?endforeach?>
-    </ComponentGroup>
-  </Fragment>
-
-  <Fragment>
-    <DirectoryRef Id="INSTALLDIR">
-      <Directory Id="localedir" Name="locale" FileSource="$(var.SourceDir)">
-        <?foreach LOC in $(var.hglocales) ?>
-          <Directory Id="hg.locale.$(var.LOC)" Name="$(var.LOC)">
-            <Directory Id="hg.locale.$(var.LOC).LC_MESSAGES" Name="LC_MESSAGES">
-              <Component Id="hg.locale.$(var.LOC)" Guid="*" Win64='$(var.IsX64)'>
-                <File Id="hg.mo.$(var.LOC)" Name="hg.mo" KeyPath="yes" />
-              </Component>
-            </Directory>
-          </Directory>
-        <?endforeach?>
-      </Directory>
-    </DirectoryRef>
-  </Fragment>
-
-</Wix>
--- a/contrib/packaging/wix/mercurial.wxs	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/wix/mercurial.wxs	Tue Jan 21 13:14:51 2020 -0500
@@ -60,30 +60,10 @@
       <Directory Id='$(var.PFolder)' Name='PFiles'>
         <Directory Id='INSTALLDIR' Name='Mercurial'>
           <Component Id='MainExecutable' Guid='$(var.ComponentMainExecutableGUID)' Win64='$(var.IsX64)'>
-            <File Id='hgEXE' Name='hg.exe' Source='dist\hg.exe' KeyPath='yes' />
+            <CreateFolder />
             <Environment Id="Environment" Name="PATH" Part="last" System="yes"
                          Permanent="no" Value="[INSTALLDIR]" Action="set" />
           </Component>
-          <Component Id='ReadMe' Guid='$(var.ReadMe.guid)' Win64='$(var.IsX64)'>
-              <File Id='ReadMe' Name='ReadMe.html' Source='contrib\win32\ReadMe.html'
-                    KeyPath='yes'/>
-          </Component>
-          <Component Id='COPYING' Guid='$(var.COPYING.guid)' Win64='$(var.IsX64)'>
-            <File Id='COPYING' Name='COPYING.rtf' Source='contrib\packaging\wix\COPYING.rtf'
-                  KeyPath='yes'/>
-          </Component>
-
-          <Directory Id='HGRCD' Name='hgrc.d'>
-            <Component Id='mercurial.rc' Guid='$(var.mercurial.rc.guid)' Win64='$(var.IsX64)'>
-              <File Id='mercurial.rc' Name='Mercurial.rc' Source='contrib\win32\mercurial.ini'
-                    ReadOnly='yes' KeyPath='yes'/>
-            </Component>
-            <Component Id='mergetools.rc' Guid='$(var.mergetools.rc.guid)' Win64='$(var.IsX64)'>
-              <File Id='mergetools.rc' Name='MergeTools.rc' Source='mercurial\default.d\mergetools.rc'
-                    ReadOnly='yes' KeyPath='yes'/>
-            </Component>
-          </Directory>
-
         </Directory>
       </Directory>
 
@@ -117,15 +97,12 @@
       <Feature Id='MainProgram' Title='Program' Description='Mercurial command line app'
              Level='1' Absent='disallow' >
         <ComponentRef Id='MainExecutable' />
-        <ComponentRef Id='distOutput' />
-        <ComponentRef Id='libOutput' />
         <ComponentRef Id='ProgramMenuDir' />
-        <ComponentRef Id='ReadMe' />
-        <ComponentRef Id='COPYING' />
-        <ComponentRef Id='mercurial.rc' />
-        <ComponentRef Id='mergetools.rc' />
-        <ComponentGroupRef Id='helpFolder' />
-        <ComponentGroupRef Id='templatesFolder' />
+        <ComponentGroupRef Id="hg.group.ROOT" />
+        <ComponentGroupRef Id="hg.group.hgrc.d" />
+        <ComponentGroupRef Id="hg.group.helptext" />
+        <ComponentGroupRef Id="hg.group.lib" />
+        <ComponentGroupRef Id="hg.group.templates" />
         <MergeRef Id='VCRuntime' />
         <MergeRef Id='VCRuntimePolicy' />
       </Feature>
@@ -135,14 +112,13 @@
         <?endforeach?>
       <?endif?>
       <Feature Id='Locales' Title='Translations' Description='Translations' Level='1'>
-        <ComponentGroupRef Id='localeFolder' />
-        <ComponentRef Id='i18nFolder' />
+        <ComponentGroupRef Id="hg.group.locale" />
       </Feature>
       <Feature Id='Documentation' Title='Documentation' Description='HTML man pages' Level='1'>
-        <ComponentGroupRef Id='docFolder' />
+        <ComponentGroupRef Id="hg.group.doc" />
       </Feature>
       <Feature Id='Misc' Title='Miscellaneous' Description='Contributed scripts' Level='1'>
-        <ComponentGroupRef Id='contribFolder' />
+        <ComponentGroupRef Id="hg.group.contrib" />
       </Feature>
     </Feature>
 
--- a/contrib/packaging/wix/readme.rst	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/wix/readme.rst	Tue Jan 21 13:14:51 2020 -0500
@@ -18,12 +18,12 @@
 * Python 2.7 (download from https://www.python.org/downloads/)
 * Microsoft Visual C++ Compiler for Python 2.7
   (https://www.microsoft.com/en-us/download/details.aspx?id=44266)
-* Python 3.5+ (to run the ``build.py`` script)
+* Python 3.5+ (to run the ``packaging.py`` script)
 
 Building
 ========
 
-The ``build.py`` script automates the process of producing an MSI
+The ``packaging.py`` script automates the process of producing an MSI
 installer. It manages fetching and configuring non-system dependencies
 (such as py2exe, gettext, and various Python packages).
 
@@ -37,11 +37,11 @@
 From the prompt, change to the Mercurial source directory. e.g.
 ``cd c:\src\hg``.
 
-Next, invoke ``build.py`` to produce an MSI installer. You will need
+Next, invoke ``packaging.py`` to produce an MSI installer. You will need
 to supply the path to the Python interpreter to use.::
 
-   $ python3 contrib\packaging\wix\build.py \
-      --python c:\python27\python.exe
+   $ python3 contrib\packaging\packaging.py \
+      wix --python c:\python27\python.exe
 
 .. note::
 
@@ -54,8 +54,8 @@
 and an installer placed in the ``dist`` sub-directory. The final line
 of output should print the name of the generated installer.
 
-Additional options may be configured. Run ``build.py --help`` to see
-a list of program flags.
+Additional options may be configured. Run ``packaging.py wix --help`` to
+see a list of program flags.
 
 Relationship to TortoiseHG
 ==========================
--- a/contrib/packaging/wix/requirements.txt	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/wix/requirements.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -1,13 +1,13 @@
-#
-# This file is autogenerated by pip-compile
-# To update, run:
-#
-#    pip-compile --generate-hashes --output-file=contrib/packaging/wix/requirements.txt contrib/packaging/wix/requirements.txt.in
-#
-docutils==0.15.2 \
-    --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
-    --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
-    --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99
-pygments==2.4.2 \
-    --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
-    --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+#    pip-compile --generate-hashes --output-file=contrib/packaging/wix/requirements.txt contrib/packaging/wix/requirements.txt.in
+#
+docutils==0.15.2 \
+    --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
+    --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
+    --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99
+pygments==2.4.2 \
+    --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
+    --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297
--- a/contrib/packaging/wix/requirements.txt.in	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/packaging/wix/requirements.txt.in	Tue Jan 21 13:14:51 2020 -0500
@@ -1,2 +1,2 @@
-docutils
-pygments
+docutils
+pygments
--- a/contrib/packaging/wix/templates.wxs	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,251 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
-
-  <?include guids.wxi ?>
-  <?include defines.wxi ?>
-
-  <Fragment>
-    <ComponentGroup Id="templatesFolder">
-
-      <ComponentRef Id="templates.root" />
-
-      <ComponentRef Id="templates.atom" />
-      <ComponentRef Id="templates.coal" />
-      <ComponentRef Id="templates.gitweb" />
-      <ComponentRef Id="templates.json" />
-      <ComponentRef Id="templates.monoblue" />
-      <ComponentRef Id="templates.paper" />
-      <ComponentRef Id="templates.raw" />
-      <ComponentRef Id="templates.rss" />
-      <ComponentRef Id="templates.spartan" />
-      <ComponentRef Id="templates.static" />
-
-    </ComponentGroup>
-  </Fragment>
-
-  <Fragment>
-    <DirectoryRef Id="INSTALLDIR">
-
-      <Directory Id="templatesdir" Name="templates" FileSource="$(var.SourceDir)">
-
-        <Component Id="templates.root" Guid="$(var.templates.root.guid)" Win64='$(var.IsX64)'>
-          <File Name="map-cmdline.changelog" KeyPath="yes" />
-          <File Name="map-cmdline.compact" />
-          <File Name="map-cmdline.default" />
-          <File Name="map-cmdline.show" />
-          <File Name="map-cmdline.bisect" />
-          <File Name="map-cmdline.xml" />
-          <File Name="map-cmdline.status" />
-          <File Name="map-cmdline.phases" />
-        </Component>
-
-        <Directory Id="templates.jsondir" Name="json">
-          <Component Id="templates.json" Guid="$(var.templates.json.guid)" Win64='$(var.IsX64)'>
-            <File Id="json.changelist.tmpl" Name="changelist.tmpl" KeyPath="yes" />
-            <File Id="json.graph.tmpl"      Name="graph.tmpl" />
-            <File Id="json.map"             Name="map" />
-          </Component>
-        </Directory>
-
-        <Directory Id="templates.atomdir" Name="atom">
-          <Component Id="templates.atom" Guid="$(var.templates.atom.guid)" Win64='$(var.IsX64)'>
-            <File Id="atom.changelog.tmpl"      Name="changelog.tmpl" KeyPath="yes" />
-            <File Id="atom.changelogentry.tmpl" Name="changelogentry.tmpl" />
-            <File Id="atom.error.tmpl"          Name="error.tmpl" />
-            <File Id="atom.filelog.tmpl"        Name="filelog.tmpl" />
-            <File Id="atom.header.tmpl"         Name="header.tmpl" />
-            <File Id="atom.map"                 Name="map" />
-            <File Id="atom.tagentry.tmpl"       Name="tagentry.tmpl" />
-            <File Id="atom.tags.tmpl"           Name="tags.tmpl" />
-            <File Id="atom.branchentry.tmpl"    Name="branchentry.tmpl" />
-            <File Id="atom.branches.tmpl"       Name="branches.tmpl" />
-            <File Id="atom.bookmarks.tmpl"      Name="bookmarks.tmpl" />
-            <File Id="atom.bookmarkentry.tmpl"  Name="bookmarkentry.tmpl" />
-          </Component>
-        </Directory>
-
-        <Directory Id="templates.coaldir" Name="coal">
-          <Component Id="templates.coal" Guid="$(var.templates.coal.guid)" Win64='$(var.IsX64)'>
-            <File Id="coal.header.tmpl" Name="header.tmpl" KeyPath="yes" />
-            <File Id="coal.map"         Name="map" />
-          </Component>
-        </Directory>
-
-        <Directory Id="templates.gitwebdir" Name="gitweb">
-          <Component Id="templates.gitweb" Guid="$(var.templates.gitweb.guid)" Win64='$(var.IsX64)'>
-            <File Id="gitweb.branches.tmpl"       Name="branches.tmpl" KeyPath="yes" />
-            <File Id="gitweb.bookmarks.tmpl"      Name="bookmarks.tmpl" />
-            <File Id="gitweb.changelog.tmpl"      Name="changelog.tmpl" />
-            <File Id="gitweb.changelogentry.tmpl" Name="changelogentry.tmpl" />
-            <File Id="gitweb.changeset.tmpl"      Name="changeset.tmpl" />
-            <File Id="gitweb.error.tmpl"          Name="error.tmpl" />
-            <File Id="gitweb.fileannotate.tmpl"   Name="fileannotate.tmpl" />
-            <File Id="gitweb.filecomparison.tmpl" Name="filecomparison.tmpl" />
-            <File Id="gitweb.filediff.tmpl"       Name="filediff.tmpl" />
-            <File Id="gitweb.filelog.tmpl"        Name="filelog.tmpl" />
-            <File Id="gitweb.filerevision.tmpl"   Name="filerevision.tmpl" />
-            <File Id="gitweb.footer.tmpl"         Name="footer.tmpl" />
-            <File Id="gitweb.graph.tmpl"          Name="graph.tmpl" />
-            <File Id="gitweb.graphentry.tmpl"     Name="graphentry.tmpl" />
-            <File Id="gitweb.header.tmpl"         Name="header.tmpl" />
-            <File Id="gitweb.index.tmpl"          Name="index.tmpl" />
-            <File Id="gitweb.manifest.tmpl"       Name="manifest.tmpl" />
-            <File Id="gitweb.map"                 Name="map" />
-            <File Id="gitweb.notfound.tmpl"       Name="notfound.tmpl" />
-            <File Id="gitweb.search.tmpl"         Name="search.tmpl" />
-            <File Id="gitweb.shortlog.tmpl"       Name="shortlog.tmpl" />
-            <File Id="gitweb.summary.tmpl"        Name="summary.tmpl" />
-            <File Id="gitweb.tags.tmpl"           Name="tags.tmpl" />
-            <File Id="gitweb.help.tmpl"           Name="help.tmpl" />
-            <File Id="gitweb.helptopics.tmpl"     Name="helptopics.tmpl" />
-          </Component>
-        </Directory>
-
-        <Directory Id="templates.monobluedir" Name="monoblue">
-          <Component Id="templates.monoblue" Guid="$(var.templates.monoblue.guid)" Win64='$(var.IsX64)'>
-            <File Id="monoblue.branches.tmpl"       Name="branches.tmpl" KeyPath="yes" />
-            <File Id="monoblue.bookmarks.tmpl"      Name="bookmarks.tmpl" />
-            <File Id="monoblue.changelog.tmpl"      Name="changelog.tmpl" />
-            <File Id="monoblue.changelogentry.tmpl" Name="changelogentry.tmpl" />
-            <File Id="monoblue.changeset.tmpl"      Name="changeset.tmpl" />
-            <File Id="monoblue.error.tmpl"          Name="error.tmpl" />
-            <File Id="monoblue.fileannotate.tmpl"   Name="fileannotate.tmpl" />
-            <File Id="monoblue.filecomparison.tmpl" Name="filecomparison.tmpl" />
-            <File Id="monoblue.filediff.tmpl"       Name="filediff.tmpl" />
-            <File Id="monoblue.filelog.tmpl"        Name="filelog.tmpl" />
-            <File Id="monoblue.filerevision.tmpl"   Name="filerevision.tmpl" />
-            <File Id="monoblue.footer.tmpl"         Name="footer.tmpl" />
-            <File Id="monoblue.graph.tmpl"          Name="graph.tmpl" />
-            <File Id="monoblue.graphentry.tmpl"     Name="graphentry.tmpl" />
-            <File Id="monoblue.header.tmpl"         Name="header.tmpl" />
-            <File Id="monoblue.index.tmpl"          Name="index.tmpl" />
-            <File Id="monoblue.manifest.tmpl"       Name="manifest.tmpl" />
-            <File Id="monoblue.map"                 Name="map" />
-            <File Id="monoblue.notfound.tmpl"       Name="notfound.tmpl" />
-            <File Id="monoblue.search.tmpl"         Name="search.tmpl" />
-            <File Id="monoblue.shortlog.tmpl"       Name="shortlog.tmpl" />
-            <File Id="monoblue.summary.tmpl"        Name="summary.tmpl" />
-            <File Id="monoblue.tags.tmpl"           Name="tags.tmpl" />
-            <File Id="monoblue.help.tmpl"           Name="help.tmpl" />
-            <File Id="monoblue.helptopics.tmpl"     Name="helptopics.tmpl" />
-          </Component>
-        </Directory>
-
-        <Directory Id="templates.paperdir" Name="paper">
-          <Component Id="templates.paper" Guid="$(var.templates.paper.guid)" Win64='$(var.IsX64)'>
-            <File Id="paper.branches.tmpl"      Name="branches.tmpl" KeyPath="yes" />
-            <File Id="paper.bookmarks.tmpl"     Name="bookmarks.tmpl" />
-            <File Id="paper.changeset.tmpl"     Name="changeset.tmpl" />
-            <File Id="paper.diffstat.tmpl"      Name="diffstat.tmpl" />
-            <File Id="paper.error.tmpl"         Name="error.tmpl" />
-            <File Id="paper.fileannotate.tmpl"  Name="fileannotate.tmpl" />
-            <File Id="paper.filecomparison.tmpl" Name="filecomparison.tmpl" />
-            <File Id="paper.filediff.tmpl"      Name="filediff.tmpl" />
-            <File Id="paper.filelog.tmpl"       Name="filelog.tmpl" />
-            <File Id="paper.filelogentry.tmpl"  Name="filelogentry.tmpl" />
-            <File Id="paper.filerevision.tmpl"  Name="filerevision.tmpl" />
-            <File Id="paper.footer.tmpl"        Name="footer.tmpl" />
-            <File Id="paper.graph.tmpl"         Name="graph.tmpl" />
-            <File Id="paper.graphentry.tmpl"    Name="graphentry.tmpl" />
-            <File Id="paper.header.tmpl"        Name="header.tmpl" />
-            <File Id="paper.index.tmpl"         Name="index.tmpl" />
-            <File Id="paper.manifest.tmpl"      Name="manifest.tmpl" />
-            <File Id="paper.map"                Name="map" />
-            <File Id="paper.notfound.tmpl"      Name="notfound.tmpl" />
-            <File Id="paper.search.tmpl"        Name="search.tmpl" />
-            <File Id="paper.shortlog.tmpl"      Name="shortlog.tmpl" />
-            <File Id="paper.shortlogentry.tmpl" Name="shortlogentry.tmpl" />
-            <File Id="paper.tags.tmpl"          Name="tags.tmpl" />
-            <File Id="paper.help.tmpl"          Name="help.tmpl" />
-            <File Id="paper.helptopics.tmpl"    Name="helptopics.tmpl" />
-          </Component>
-        </Directory>
-
-        <Directory Id="templates.rawdir" Name="raw">
-          <Component Id="templates.raw" Guid="$(var.templates.raw.guid)" Win64='$(var.IsX64)'>
-            <File Id="raw.changeset.tmpl"    Name="changeset.tmpl" KeyPath="yes" />
-            <File Id="raw.error.tmpl"        Name="error.tmpl" />
-            <File Id="raw.fileannotate.tmpl" Name="fileannotate.tmpl" />
-            <File Id="raw.filediff.tmpl"     Name="filediff.tmpl" />
-            <File Id="raw.graph.tmpl"        Name="graph.tmpl" />
-            <File Id="raw.graphedge.tmpl"    Name="graphedge.tmpl" />
-            <File Id="raw.graphnode.tmpl"    Name="graphnode.tmpl" />
-            <File Id="raw.index.tmpl"        Name="index.tmpl" />
-            <File Id="raw.manifest.tmpl"     Name="manifest.tmpl" />
-            <File Id="raw.map"               Name="map" />
-            <File Id="raw.notfound.tmpl"     Name="notfound.tmpl" />
-            <File Id="raw.search.tmpl"       Name="search.tmpl" />
-            <File Id="raw.logentry.tmpl"     Name="logentry.tmpl" />
-            <File Id="raw.changelog.tmpl"    Name="changelog.tmpl" />
-          </Component>
-        </Directory>
-
-        <Directory Id="templates.rssdir" Name="rss">
-          <Component Id="templates.rss" Guid="$(var.templates.rss.guid)" Win64='$(var.IsX64)'>
-            <File Id="rss.changelog.tmpl"      Name="changelog.tmpl" KeyPath="yes" />
-            <File Id="rss.changelogentry.tmpl" Name="changelogentry.tmpl" />
-            <File Id="rss.error.tmpl"          Name="error.tmpl" />
-            <File Id="rss.filelog.tmpl"        Name="filelog.tmpl" />
-            <File Id="rss.filelogentry.tmpl"   Name="filelogentry.tmpl" />
-            <File Id="rss.header.tmpl"         Name="header.tmpl" />
-            <File Id="rss.map"                 Name="map" />
-            <File Id="rss.tagentry.tmpl"       Name="tagentry.tmpl" />
-            <File Id="rss.tags.tmpl"           Name="tags.tmpl" />
-            <File Id="rss.bookmarks.tmpl"      Name="bookmarks.tmpl" />
-            <File Id="rss.bookmarkentry.tmpl"  Name="bookmarkentry.tmpl" />
-            <File Id="rss.branchentry.tmpl"    Name="branchentry.tmpl" />
-            <File Id="rss.branches.tmpl"       Name="branches.tmpl" />
-          </Component>
-        </Directory>
-
-        <Directory Id="templates.spartandir" Name="spartan">
-          <Component Id="templates.spartan" Guid="$(var.templates.spartan.guid)" Win64='$(var.IsX64)'>
-            <File Id="spartan.branches.tmpl"       Name="branches.tmpl" KeyPath="yes" />
-            <File Id="spartan.changelog.tmpl"      Name="changelog.tmpl" />
-            <File Id="spartan.changelogentry.tmpl" Name="changelogentry.tmpl" />
-            <File Id="spartan.changeset.tmpl"      Name="changeset.tmpl" />
-            <File Id="spartan.error.tmpl"          Name="error.tmpl" />
-            <File Id="spartan.fileannotate.tmpl"   Name="fileannotate.tmpl" />
-            <File Id="spartan.filediff.tmpl"       Name="filediff.tmpl" />
-            <File Id="spartan.filelog.tmpl"        Name="filelog.tmpl" />
-            <File Id="spartan.filelogentry.tmpl"   Name="filelogentry.tmpl" />
-            <File Id="spartan.filerevision.tmpl"   Name="filerevision.tmpl" />
-            <File Id="spartan.footer.tmpl"         Name="footer.tmpl" />
-            <File Id="spartan.graph.tmpl"          Name="graph.tmpl" />
-            <File Id="spartan.graphentry.tmpl"     Name="graphentry.tmpl" />
-            <File Id="spartan.header.tmpl"         Name="header.tmpl" />
-            <File Id="spartan.index.tmpl"          Name="index.tmpl" />
-            <File Id="spartan.manifest.tmpl"       Name="manifest.tmpl" />
-            <File Id="spartan.map"                 Name="map" />
-            <File Id="spartan.notfound.tmpl"       Name="notfound.tmpl" />
-            <File Id="spartan.search.tmpl"         Name="search.tmpl" />
-            <File Id="spartan.shortlog.tmpl"       Name="shortlog.tmpl" />
-            <File Id="spartan.shortlogentry.tmpl"  Name="shortlogentry.tmpl" />
-            <File Id="spartan.tags.tmpl"           Name="tags.tmpl" />
-          </Component>
-        </Directory>
-
-        <Directory Id="templates.staticdir" Name="static">
-          <Component Id="templates.static" Guid="$(var.templates.static.guid)" Win64='$(var.IsX64)'>
-            <File Id="static.background.png"     Name="background.png" KeyPath="yes" />
-            <File Id="static.coal.file.png"      Name="coal-file.png" />
-            <File Id="static.coal.folder.png"    Name="coal-folder.png" />
-            <File Id="static.followlines.js"     Name="followlines.js" />
-            <File Id="static.mercurial.js"       Name="mercurial.js" />
-            <File Id="static.hgicon.png"         Name="hgicon.png" />
-            <File Id="static.hglogo.png"         Name="hglogo.png" />
-            <File Id="static.style.coal.css"     Name="style-extra-coal.css" />
-            <File Id="static.style.gitweb.css"   Name="style-gitweb.css" />
-            <File Id="static.style.monoblue.css" Name="style-monoblue.css" />
-            <File Id="static.style.paper.css"    Name="style-paper.css" />
-            <File Id="static.style.css"          Name="style.css" />
-            <File Id="static.feed.icon"          Name="feed-icon-14x14.png" />
-          </Component>
-        </Directory>
-
-      </Directory>
-
-    </DirectoryRef>
-  </Fragment>
-
- </Wix>
--- a/contrib/perf.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/perf.py	Tue Jan 21 13:14:51 2020 -0500
@@ -726,8 +726,8 @@
 
 def clearchangelog(repo):
     if repo is not repo.unfiltered():
-        object.__setattr__(repo, r'_clcachekey', None)
-        object.__setattr__(repo, r'_clcache', None)
+        object.__setattr__(repo, '_clcachekey', None)
+        object.__setattr__(repo, '_clcache', None)
     clearfilecache(repo.unfiltered(), 'changelog')
 
 
@@ -760,7 +760,10 @@
 
 @command(
     b'perfstatus',
-    [(b'u', b'unknown', False, b'ask status to look for unknown files')]
+    [
+        (b'u', b'unknown', False, b'ask status to look for unknown files'),
+        (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
+    ]
     + formatteropts,
 )
 def perfstatus(ui, repo, **opts):
@@ -776,7 +779,20 @@
     # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
     #                                                False))))
     timer, fm = gettimer(ui, opts)
-    timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
+    if opts[b'dirstate']:
+        dirstate = repo.dirstate
+        m = scmutil.matchall(repo)
+        unknown = opts[b'unknown']
+
+        def status_dirstate():
+            s = dirstate.status(
+                m, subrepos=[], ignored=False, clean=False, unknown=unknown
+            )
+            sum(map(bool, s))
+
+        timer(status_dirstate)
+    else:
+        timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
     fm.end()
 
 
@@ -804,6 +820,7 @@
     if util.safehasattr(cl, b'clearcaches'):
         cl.clearcaches()
     elif util.safehasattr(cl, b'_nodecache'):
+        # <= hg-5.2
         from mercurial.node import nullid, nullrev
 
         cl._nodecache = {nullid: nullrev}
@@ -1404,13 +1421,15 @@
     else:
         ui.statusnoi18n(b'publishing: no\n')
 
-    nodemap = repo.changelog.nodemap
+    has_node = getattr(repo.changelog.index, 'has_node', None)
+    if has_node is None:
+        has_node = repo.changelog.nodemap.__contains__
     nonpublishroots = 0
     for nhex, phase in remotephases.iteritems():
         if nhex == b'publishing':  # ignore data related to publish option
             continue
         node = bin(nhex)
-        if node in nodemap and int(phase):
+        if has_node(node) and int(phase):
             nonpublishroots += 1
     ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
     ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
@@ -1610,7 +1629,11 @@
     def setnodeget():
         # probably not necessary, but for good measure
         clearchangelog(unfi)
-        nodeget[0] = makecl(unfi).nodemap.get
+        cl = makecl(unfi)
+        if util.safehasattr(cl.index, 'get_rev'):
+            nodeget[0] = cl.index.get_rev
+        else:
+            nodeget[0] = cl.nodemap.get
 
     def d():
         get = nodeget[0]
@@ -1636,13 +1659,13 @@
     timer, fm = gettimer(ui, opts)
 
     def d():
-        if os.name != r'nt':
+        if os.name != 'nt':
             os.system(
                 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
             )
         else:
-            os.environ[r'HGRCPATH'] = r' '
-            os.system(r"%s version -q > NUL" % sys.argv[0])
+            os.environ['HGRCPATH'] = r' '
+            os.system("%s version -q > NUL" % sys.argv[0])
 
     timer(d)
     fm.end()
@@ -1828,7 +1851,7 @@
     opts = _byteskwargs(opts)
 
     nullui = ui.copy()
-    nullui.fout = open(os.devnull, r'wb')
+    nullui.fout = open(os.devnull, 'wb')
     nullui.disablepager()
     revs = opts.get(b'rev')
     if not revs:
@@ -1855,7 +1878,6 @@
 
 
 def _displaystats(ui, opts, entries, data):
-    pass
     # use a second formatter because the data are quite different, not sure
     # how it flies with the templater.
     fm = ui.formatter(b'perf-stats', opts)
@@ -2025,8 +2047,8 @@
                 data['p1.time'] = end - begin
                 begin = util.timer()
                 p2renames = copies.pathcopies(b, p2)
+                end = util.timer()
                 data['p2.time'] = end - begin
-                end = util.timer()
                 data['p1.renamedfiles'] = len(p1renames)
                 data['p2.renamedfiles'] = len(p2renames)
 
@@ -2198,9 +2220,6 @@
 
     fm.end()
     if dostats:
-        # use a second formatter because the data are quite different, not sure
-        # how it flies with the templater.
-        fm = ui.formatter(b'perf', opts)
         entries = [
             ('nbrevs', 'number of revision covered'),
             ('nbmissingfiles', 'number of missing files at head'),
@@ -2576,25 +2595,38 @@
                 index[rev]
 
     def resolvenode(node):
-        nodemap = revlogio.parseindex(data, inline)[1]
-        # This only works for the C code.
-        if nodemap is None:
-            return
+        index = revlogio.parseindex(data, inline)[0]
+        rev = getattr(index, 'rev', None)
+        if rev is None:
+            nodemap = getattr(
+                revlogio.parseindex(data, inline)[0], 'nodemap', None
+            )
+            # This only works for the C code.
+            if nodemap is None:
+                return
+            rev = nodemap.__getitem__
 
         try:
-            nodemap[node]
+            rev(node)
         except error.RevlogError:
             pass
 
     def resolvenodes(nodes, count=1):
-        nodemap = revlogio.parseindex(data, inline)[1]
-        if nodemap is None:
-            return
+        index = revlogio.parseindex(data, inline)[0]
+        rev = getattr(index, 'rev', None)
+        if rev is None:
+            nodemap = getattr(
+                revlogio.parseindex(data, inline)[0], 'nodemap', None
+            )
+            # This only works for the C code.
+            if nodemap is None:
+                return
+            rev = nodemap.__getitem__
 
         for i in range(count):
             for node in nodes:
                 try:
-                    nodemap[node]
+                    rev(node)
                 except error.RevlogError:
                     pass
 
--- a/contrib/python-zstandard/NEWS.rst	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/NEWS.rst	Tue Jan 21 13:14:51 2020 -0500
@@ -43,13 +43,18 @@
 * Support modifying compression parameters mid operation when supported by
   zstd API.
 * Expose ``ZSTD_CLEVEL_DEFAULT`` constant.
+* Expose ``ZSTD_SRCSIZEHINT_{MIN,MAX}`` constants.
 * Support ``ZSTD_p_forceAttachDict`` compression parameter.
-* Support ``ZSTD_c_literalCompressionMode `` compression parameter.
+* Support ``ZSTD_dictForceLoad`` dictionary compression parameter.
+* Support ``ZSTD_c_targetCBlockSize`` compression parameter.
+* Support ``ZSTD_c_literalCompressionMode`` compression parameter.
+* Support ``ZSTD_c_srcSizeHint`` compression parameter.
 * Use ``ZSTD_CCtx_getParameter()``/``ZSTD_CCtxParam_getParameter()`` for retrieving
   compression parameters.
 * Consider exposing ``ZSTDMT_toFlushNow()``.
 * Expose ``ZDICT_trainFromBuffer_fastCover()``,
   ``ZDICT_optimizeTrainFromBuffer_fastCover``.
+* Expose ``ZSTD_Sequence`` struct and related ``ZSTD_getSequences()`` API.
 * Expose and enforce ``ZSTD_minCLevel()`` for minimum compression level.
 * Consider a ``chunker()`` API for decompression.
 * Consider stats for ``chunker()`` API, including finding the last consumed
@@ -67,6 +72,20 @@
 * API for ensuring max memory ceiling isn't exceeded.
 * Move off nose for testing.
 
+0.13.0 (released 2019-12-28)
+============================
+
+Changes
+-------
+
+* ``pytest-xdist`` ``pytest`` extension is now installed so tests can be
+  run in parallel.
+* CI now builds ``manylinux2010`` and ``manylinux2014`` binary wheels
+  instead of a mix of ``manylinux2010`` and ``manylinux1``.
+* Official support for Python 3.8 has been added.
+* Bundled zstandard library upgraded from 1.4.3 to 1.4.4.
+* Python code has been reformatted with black.
+
 0.12.0 (released 2019-09-15)
 ============================
 
--- a/contrib/python-zstandard/README.rst	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/README.rst	Tue Jan 21 13:14:51 2020 -0500
@@ -20,7 +20,7 @@
 Requirements
 ============
 
-This extension is designed to run with Python 2.7, 3.4, 3.5, 3.6, and 3.7
+This extension is designed to run with Python 2.7, 3.5, 3.6, 3.7, and 3.8
 on common platforms (Linux, Windows, and OS X). On PyPy (both PyPy2 and PyPy3) we support version 6.0.0 and above. 
 x86 and x86_64 are well-tested on Windows. Only x86_64 is well-tested on Linux and macOS.
 
--- a/contrib/python-zstandard/c-ext/python-zstandard.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/c-ext/python-zstandard.h	Tue Jan 21 13:14:51 2020 -0500
@@ -16,7 +16,7 @@
 #include <zdict.h>
 
 /* Remember to change the string in zstandard/__init__ as well */
-#define PYTHON_ZSTANDARD_VERSION "0.12.0"
+#define PYTHON_ZSTANDARD_VERSION "0.13.0"
 
 typedef enum {
 	compressorobj_flush_finish,
--- a/contrib/python-zstandard/make_cffi.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/make_cffi.py	Tue Jan 21 13:14:51 2020 -0500
@@ -16,80 +16,82 @@
 
 HERE = os.path.abspath(os.path.dirname(__file__))
 
-SOURCES = ['zstd/%s' % p for p in (
-    'common/debug.c',
-    'common/entropy_common.c',
-    'common/error_private.c',
-    'common/fse_decompress.c',
-    'common/pool.c',
-    'common/threading.c',
-    'common/xxhash.c',
-    'common/zstd_common.c',
-    'compress/fse_compress.c',
-    'compress/hist.c',
-    'compress/huf_compress.c',
-    'compress/zstd_compress.c',
-    'compress/zstd_compress_literals.c',
-    'compress/zstd_compress_sequences.c',
-    'compress/zstd_double_fast.c',
-    'compress/zstd_fast.c',
-    'compress/zstd_lazy.c',
-    'compress/zstd_ldm.c',
-    'compress/zstd_opt.c',
-    'compress/zstdmt_compress.c',
-    'decompress/huf_decompress.c',
-    'decompress/zstd_ddict.c',
-    'decompress/zstd_decompress.c',
-    'decompress/zstd_decompress_block.c',
-    'dictBuilder/cover.c',
-    'dictBuilder/fastcover.c',
-    'dictBuilder/divsufsort.c',
-    'dictBuilder/zdict.c',
-)]
+SOURCES = [
+    "zstd/%s" % p
+    for p in (
+        "common/debug.c",
+        "common/entropy_common.c",
+        "common/error_private.c",
+        "common/fse_decompress.c",
+        "common/pool.c",
+        "common/threading.c",
+        "common/xxhash.c",
+        "common/zstd_common.c",
+        "compress/fse_compress.c",
+        "compress/hist.c",
+        "compress/huf_compress.c",
+        "compress/zstd_compress.c",
+        "compress/zstd_compress_literals.c",
+        "compress/zstd_compress_sequences.c",
+        "compress/zstd_double_fast.c",
+        "compress/zstd_fast.c",
+        "compress/zstd_lazy.c",
+        "compress/zstd_ldm.c",
+        "compress/zstd_opt.c",
+        "compress/zstdmt_compress.c",
+        "decompress/huf_decompress.c",
+        "decompress/zstd_ddict.c",
+        "decompress/zstd_decompress.c",
+        "decompress/zstd_decompress_block.c",
+        "dictBuilder/cover.c",
+        "dictBuilder/fastcover.c",
+        "dictBuilder/divsufsort.c",
+        "dictBuilder/zdict.c",
+    )
+]
 
 # Headers whose preprocessed output will be fed into cdef().
-HEADERS = [os.path.join(HERE, 'zstd', *p) for p in (
-    ('zstd.h',),
-    ('dictBuilder', 'zdict.h'),
-)]
+HEADERS = [
+    os.path.join(HERE, "zstd", *p) for p in (("zstd.h",), ("dictBuilder", "zdict.h"),)
+]
 
-INCLUDE_DIRS = [os.path.join(HERE, d) for d in (
-    'zstd',
-    'zstd/common',
-    'zstd/compress',
-    'zstd/decompress',
-    'zstd/dictBuilder',
-)]
+INCLUDE_DIRS = [
+    os.path.join(HERE, d)
+    for d in (
+        "zstd",
+        "zstd/common",
+        "zstd/compress",
+        "zstd/decompress",
+        "zstd/dictBuilder",
+    )
+]
 
 # cffi can't parse some of the primitives in zstd.h. So we invoke the
 # preprocessor and feed its output into cffi.
 compiler = distutils.ccompiler.new_compiler()
 
 # Needed for MSVC.
-if hasattr(compiler, 'initialize'):
+if hasattr(compiler, "initialize"):
     compiler.initialize()
 
 # Distutils doesn't set compiler.preprocessor, so invoke the preprocessor
 # manually.
-if compiler.compiler_type == 'unix':
-    args = list(compiler.executables['compiler'])
-    args.extend([
-        '-E',
-        '-DZSTD_STATIC_LINKING_ONLY',
-        '-DZDICT_STATIC_LINKING_ONLY',
-    ])
-elif compiler.compiler_type == 'msvc':
+if compiler.compiler_type == "unix":
+    args = list(compiler.executables["compiler"])
+    args.extend(
+        ["-E", "-DZSTD_STATIC_LINKING_ONLY", "-DZDICT_STATIC_LINKING_ONLY",]
+    )
+elif compiler.compiler_type == "msvc":
     args = [compiler.cc]
-    args.extend([
-        '/EP',
-        '/DZSTD_STATIC_LINKING_ONLY',
-        '/DZDICT_STATIC_LINKING_ONLY',
-    ])
+    args.extend(
+        ["/EP", "/DZSTD_STATIC_LINKING_ONLY", "/DZDICT_STATIC_LINKING_ONLY",]
+    )
 else:
-    raise Exception('unsupported compiler type: %s' % compiler.compiler_type)
+    raise Exception("unsupported compiler type: %s" % compiler.compiler_type)
+
 
 def preprocess(path):
-    with open(path, 'rb') as fh:
+    with open(path, "rb") as fh:
         lines = []
         it = iter(fh)
 
@@ -104,32 +106,44 @@
             # We define ZSTD_STATIC_LINKING_ONLY, which is redundant with the inline
             # #define in zstdmt_compress.h and results in a compiler warning. So drop
             # the inline #define.
-            if l.startswith((b'#include <stddef.h>',
-                             b'#include "zstd.h"',
-                             b'#define ZSTD_STATIC_LINKING_ONLY')):
+            if l.startswith(
+                (
+                    b"#include <stddef.h>",
+                    b'#include "zstd.h"',
+                    b"#define ZSTD_STATIC_LINKING_ONLY",
+                )
+            ):
                 continue
 
+            # The preprocessor environment on Windows doesn't define include
+            # paths, so the #include of limits.h fails. We work around this
+            # by removing that import and defining INT_MAX ourselves. This is
+            # a bit hacky. But it gets the job done.
+            # TODO make limits.h work on Windows so we ensure INT_MAX is
+            # correct.
+            if l.startswith(b"#include <limits.h>"):
+                l = b"#define INT_MAX 2147483647\n"
+
             # ZSTDLIB_API may not be defined if we dropped zstd.h. It isn't
             # important so just filter it out.
-            if l.startswith(b'ZSTDLIB_API'):
-                l = l[len(b'ZSTDLIB_API '):]
+            if l.startswith(b"ZSTDLIB_API"):
+                l = l[len(b"ZSTDLIB_API ") :]
 
             lines.append(l)
 
-    fd, input_file = tempfile.mkstemp(suffix='.h')
-    os.write(fd, b''.join(lines))
+    fd, input_file = tempfile.mkstemp(suffix=".h")
+    os.write(fd, b"".join(lines))
     os.close(fd)
 
     try:
         env = dict(os.environ)
-        if getattr(compiler, '_paths', None):
-            env['PATH'] = compiler._paths
-        process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE,
-                                   env=env)
+        if getattr(compiler, "_paths", None):
+            env["PATH"] = compiler._paths
+        process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE, env=env)
         output = process.communicate()[0]
         ret = process.poll()
         if ret:
-            raise Exception('preprocessor exited with error')
+            raise Exception("preprocessor exited with error")
 
         return output
     finally:
@@ -141,16 +155,16 @@
     for line in output.splitlines():
         # CFFI's parser doesn't like __attribute__ on UNIX compilers.
         if line.startswith(b'__attribute__ ((visibility ("default"))) '):
-            line = line[len(b'__attribute__ ((visibility ("default"))) '):]
+            line = line[len(b'__attribute__ ((visibility ("default"))) ') :]
 
-        if line.startswith(b'__attribute__((deprecated('):
+        if line.startswith(b"__attribute__((deprecated("):
             continue
-        elif b'__declspec(deprecated(' in line:
+        elif b"__declspec(deprecated(" in line:
             continue
 
         lines.append(line)
 
-    return b'\n'.join(lines)
+    return b"\n".join(lines)
 
 
 ffi = cffi.FFI()
@@ -159,18 +173,22 @@
 # *_DISABLE_DEPRECATE_WARNINGS prevents the compiler from emitting a warning
 # when cffi uses the function. Since we statically link against zstd, even
 # if we use the deprecated functions it shouldn't be a huge problem.
-ffi.set_source('_zstd_cffi', '''
+ffi.set_source(
+    "_zstd_cffi",
+    """
 #define MIN(a,b) ((a)<(b) ? (a) : (b))
 #define ZSTD_STATIC_LINKING_ONLY
 #include <zstd.h>
 #define ZDICT_STATIC_LINKING_ONLY
 #define ZDICT_DISABLE_DEPRECATE_WARNINGS
 #include <zdict.h>
-''', sources=SOURCES,
-     include_dirs=INCLUDE_DIRS,
-     extra_compile_args=['-DZSTD_MULTITHREAD'])
+""",
+    sources=SOURCES,
+    include_dirs=INCLUDE_DIRS,
+    extra_compile_args=["-DZSTD_MULTITHREAD"],
+)
 
-DEFINE = re.compile(b'^\\#define ([a-zA-Z0-9_]+) ')
+DEFINE = re.compile(b"^\\#define ([a-zA-Z0-9_]+) ")
 
 sources = []
 
@@ -181,27 +199,27 @@
 
     # #define's are effectively erased as part of going through preprocessor.
     # So perform a manual pass to re-add those to the cdef source.
-    with open(header, 'rb') as fh:
+    with open(header, "rb") as fh:
         for line in fh:
             line = line.strip()
             m = DEFINE.match(line)
             if not m:
                 continue
 
-            if m.group(1) == b'ZSTD_STATIC_LINKING_ONLY':
+            if m.group(1) == b"ZSTD_STATIC_LINKING_ONLY":
                 continue
 
             # The parser doesn't like some constants with complex values.
-            if m.group(1) in (b'ZSTD_LIB_VERSION', b'ZSTD_VERSION_STRING'):
+            if m.group(1) in (b"ZSTD_LIB_VERSION", b"ZSTD_VERSION_STRING"):
                 continue
 
             # The ... is magic syntax by the cdef parser to resolve the
             # value at compile time.
-            sources.append(m.group(0) + b' ...')
+            sources.append(m.group(0) + b" ...")
 
-cdeflines = b'\n'.join(sources).splitlines()
+cdeflines = b"\n".join(sources).splitlines()
 cdeflines = [l for l in cdeflines if l.strip()]
-ffi.cdef(b'\n'.join(cdeflines).decode('latin1'))
+ffi.cdef(b"\n".join(cdeflines).decode("latin1"))
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     ffi.compile()
--- a/contrib/python-zstandard/setup.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/setup.py	Tue Jan 21 13:14:51 2020 -0500
@@ -16,7 +16,7 @@
 # (like memoryview).
 # Need feature in 1.11 for ffi.gc() to declare size of objects so we avoid
 # garbage collection pitfalls.
-MINIMUM_CFFI_VERSION = '1.11'
+MINIMUM_CFFI_VERSION = "1.11"
 
 try:
     import cffi
@@ -26,9 +26,11 @@
     # out the CFFI version here and reject CFFI if it is too old.
     cffi_version = LooseVersion(cffi.__version__)
     if cffi_version < LooseVersion(MINIMUM_CFFI_VERSION):
-        print('CFFI 1.11 or newer required (%s found); '
-              'not building CFFI backend' % cffi_version,
-              file=sys.stderr)
+        print(
+            "CFFI 1.11 or newer required (%s found); "
+            "not building CFFI backend" % cffi_version,
+            file=sys.stderr,
+        )
         cffi = None
 
 except ImportError:
@@ -40,73 +42,77 @@
 SYSTEM_ZSTD = False
 WARNINGS_AS_ERRORS = False
 
-if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''):
+if os.environ.get("ZSTD_WARNINGS_AS_ERRORS", ""):
     WARNINGS_AS_ERRORS = True
 
-if '--legacy' in sys.argv:
+if "--legacy" in sys.argv:
     SUPPORT_LEGACY = True
-    sys.argv.remove('--legacy')
+    sys.argv.remove("--legacy")
 
-if '--system-zstd' in sys.argv:
+if "--system-zstd" in sys.argv:
     SYSTEM_ZSTD = True
-    sys.argv.remove('--system-zstd')
+    sys.argv.remove("--system-zstd")
 
-if '--warnings-as-errors' in sys.argv:
+if "--warnings-as-errors" in sys.argv:
     WARNINGS_AS_ERRORS = True
-    sys.argv.remove('--warning-as-errors')
+    sys.argv.remove("--warning-as-errors")
 
 # Code for obtaining the Extension instance is in its own module to
 # facilitate reuse in other projects.
 extensions = [
-    setup_zstd.get_c_extension(name='zstd',
-                               support_legacy=SUPPORT_LEGACY,
-                               system_zstd=SYSTEM_ZSTD,
-                               warnings_as_errors=WARNINGS_AS_ERRORS),
+    setup_zstd.get_c_extension(
+        name="zstd",
+        support_legacy=SUPPORT_LEGACY,
+        system_zstd=SYSTEM_ZSTD,
+        warnings_as_errors=WARNINGS_AS_ERRORS,
+    ),
 ]
 
 install_requires = []
 
 if cffi:
     import make_cffi
+
     extensions.append(make_cffi.ffi.distutils_extension())
-    install_requires.append('cffi>=%s' % MINIMUM_CFFI_VERSION)
+    install_requires.append("cffi>=%s" % MINIMUM_CFFI_VERSION)
 
 version = None
 
-with open('c-ext/python-zstandard.h', 'r') as fh:
+with open("c-ext/python-zstandard.h", "r") as fh:
     for line in fh:
-        if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'):
+        if not line.startswith("#define PYTHON_ZSTANDARD_VERSION"):
             continue
 
         version = line.split()[2][1:-1]
         break
 
 if not version:
-    raise Exception('could not resolve package version; '
-                    'this should never happen')
+    raise Exception("could not resolve package version; " "this should never happen")
 
 setup(
-    name='zstandard',
+    name="zstandard",
     version=version,
-    description='Zstandard bindings for Python',
-    long_description=open('README.rst', 'r').read(),
-    url='https://github.com/indygreg/python-zstandard',
-    author='Gregory Szorc',
-    author_email='gregory.szorc@gmail.com',
-    license='BSD',
+    description="Zstandard bindings for Python",
+    long_description=open("README.rst", "r").read(),
+    url="https://github.com/indygreg/python-zstandard",
+    author="Gregory Szorc",
+    author_email="gregory.szorc@gmail.com",
+    license="BSD",
     classifiers=[
-        'Development Status :: 4 - Beta',
-        'Intended Audience :: Developers',
-        'License :: OSI Approved :: BSD License',
-        'Programming Language :: C',
-        'Programming Language :: Python :: 2.7',
-        'Programming Language :: Python :: 3.5',
-        'Programming Language :: Python :: 3.6',
-        'Programming Language :: Python :: 3.7',
+        "Development Status :: 4 - Beta",
+        "Intended Audience :: Developers",
+        "License :: OSI Approved :: BSD License",
+        "Programming Language :: C",
+        "Programming Language :: Python :: 2.7",
+        "Programming Language :: Python :: 3.5",
+        "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: 3.7",
+        "Programming Language :: Python :: 3.8",
     ],
-    keywords='zstandard zstd compression',
-    packages=['zstandard'],
+    keywords="zstandard zstd compression",
+    packages=["zstandard"],
     ext_modules=extensions,
-    test_suite='tests',
+    test_suite="tests",
     install_requires=install_requires,
+    tests_require=["hypothesis"],
 )
--- a/contrib/python-zstandard/setup_zstd.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/setup_zstd.py	Tue Jan 21 13:14:51 2020 -0500
@@ -10,97 +10,110 @@
 from distutils.extension import Extension
 
 
-zstd_sources = ['zstd/%s' % p for p in (
-    'common/debug.c',
-    'common/entropy_common.c',
-    'common/error_private.c',
-    'common/fse_decompress.c',
-    'common/pool.c',
-    'common/threading.c',
-    'common/xxhash.c',
-    'common/zstd_common.c',
-    'compress/fse_compress.c',
-    'compress/hist.c',
-    'compress/huf_compress.c',
-    'compress/zstd_compress_literals.c',
-    'compress/zstd_compress_sequences.c',
-    'compress/zstd_compress.c',
-    'compress/zstd_double_fast.c',
-    'compress/zstd_fast.c',
-    'compress/zstd_lazy.c',
-    'compress/zstd_ldm.c',
-    'compress/zstd_opt.c',
-    'compress/zstdmt_compress.c',
-    'decompress/huf_decompress.c',
-    'decompress/zstd_ddict.c',
-    'decompress/zstd_decompress.c',
-    'decompress/zstd_decompress_block.c',
-    'dictBuilder/cover.c',
-    'dictBuilder/divsufsort.c',
-    'dictBuilder/fastcover.c',
-    'dictBuilder/zdict.c',
-)]
+zstd_sources = [
+    "zstd/%s" % p
+    for p in (
+        "common/debug.c",
+        "common/entropy_common.c",
+        "common/error_private.c",
+        "common/fse_decompress.c",
+        "common/pool.c",
+        "common/threading.c",
+        "common/xxhash.c",
+        "common/zstd_common.c",
+        "compress/fse_compress.c",
+        "compress/hist.c",
+        "compress/huf_compress.c",
+        "compress/zstd_compress_literals.c",
+        "compress/zstd_compress_sequences.c",
+        "compress/zstd_compress.c",
+        "compress/zstd_double_fast.c",
+        "compress/zstd_fast.c",
+        "compress/zstd_lazy.c",
+        "compress/zstd_ldm.c",
+        "compress/zstd_opt.c",
+        "compress/zstdmt_compress.c",
+        "decompress/huf_decompress.c",
+        "decompress/zstd_ddict.c",
+        "decompress/zstd_decompress.c",
+        "decompress/zstd_decompress_block.c",
+        "dictBuilder/cover.c",
+        "dictBuilder/divsufsort.c",
+        "dictBuilder/fastcover.c",
+        "dictBuilder/zdict.c",
+    )
+]
 
-zstd_sources_legacy = ['zstd/%s' % p for p in (
-    'deprecated/zbuff_common.c',
-    'deprecated/zbuff_compress.c',
-    'deprecated/zbuff_decompress.c',
-    'legacy/zstd_v01.c',
-    'legacy/zstd_v02.c',
-    'legacy/zstd_v03.c',
-    'legacy/zstd_v04.c',
-    'legacy/zstd_v05.c',
-    'legacy/zstd_v06.c',
-    'legacy/zstd_v07.c'
-)]
+zstd_sources_legacy = [
+    "zstd/%s" % p
+    for p in (
+        "deprecated/zbuff_common.c",
+        "deprecated/zbuff_compress.c",
+        "deprecated/zbuff_decompress.c",
+        "legacy/zstd_v01.c",
+        "legacy/zstd_v02.c",
+        "legacy/zstd_v03.c",
+        "legacy/zstd_v04.c",
+        "legacy/zstd_v05.c",
+        "legacy/zstd_v06.c",
+        "legacy/zstd_v07.c",
+    )
+]
 
 zstd_includes = [
-    'zstd',
-    'zstd/common',
-    'zstd/compress',
-    'zstd/decompress',
-    'zstd/dictBuilder',
+    "zstd",
+    "zstd/common",
+    "zstd/compress",
+    "zstd/decompress",
+    "zstd/dictBuilder",
 ]
 
 zstd_includes_legacy = [
-    'zstd/deprecated',
-    'zstd/legacy',
+    "zstd/deprecated",
+    "zstd/legacy",
 ]
 
 ext_includes = [
-    'c-ext',
-    'zstd/common',
+    "c-ext",
+    "zstd/common",
 ]
 
 ext_sources = [
-    'zstd/common/pool.c',
-    'zstd/common/threading.c',
-    'zstd.c',
-    'c-ext/bufferutil.c',
-    'c-ext/compressiondict.c',
-    'c-ext/compressobj.c',
-    'c-ext/compressor.c',
-    'c-ext/compressoriterator.c',
-    'c-ext/compressionchunker.c',
-    'c-ext/compressionparams.c',
-    'c-ext/compressionreader.c',
-    'c-ext/compressionwriter.c',
-    'c-ext/constants.c',
-    'c-ext/decompressobj.c',
-    'c-ext/decompressor.c',
-    'c-ext/decompressoriterator.c',
-    'c-ext/decompressionreader.c',
-    'c-ext/decompressionwriter.c',
-    'c-ext/frameparams.c',
+    "zstd/common/error_private.c",
+    "zstd/common/pool.c",
+    "zstd/common/threading.c",
+    "zstd/common/zstd_common.c",
+    "zstd.c",
+    "c-ext/bufferutil.c",
+    "c-ext/compressiondict.c",
+    "c-ext/compressobj.c",
+    "c-ext/compressor.c",
+    "c-ext/compressoriterator.c",
+    "c-ext/compressionchunker.c",
+    "c-ext/compressionparams.c",
+    "c-ext/compressionreader.c",
+    "c-ext/compressionwriter.c",
+    "c-ext/constants.c",
+    "c-ext/decompressobj.c",
+    "c-ext/decompressor.c",
+    "c-ext/decompressoriterator.c",
+    "c-ext/decompressionreader.c",
+    "c-ext/decompressionwriter.c",
+    "c-ext/frameparams.c",
 ]
 
 zstd_depends = [
-    'c-ext/python-zstandard.h',
+    "c-ext/python-zstandard.h",
 ]
 
 
-def get_c_extension(support_legacy=False, system_zstd=False, name='zstd',
-                    warnings_as_errors=False, root=None):
+def get_c_extension(
+    support_legacy=False,
+    system_zstd=False,
+    name="zstd",
+    warnings_as_errors=False,
+    root=None,
+):
     """Obtain a distutils.extension.Extension for the C extension.
 
     ``support_legacy`` controls whether to compile in legacy zstd format support.
@@ -125,17 +138,16 @@
     if not system_zstd:
         sources.update([os.path.join(actual_root, p) for p in zstd_sources])
         if support_legacy:
-            sources.update([os.path.join(actual_root, p)
-                            for p in zstd_sources_legacy])
+            sources.update([os.path.join(actual_root, p) for p in zstd_sources_legacy])
     sources = list(sources)
 
     include_dirs = set([os.path.join(actual_root, d) for d in ext_includes])
     if not system_zstd:
-        include_dirs.update([os.path.join(actual_root, d)
-                             for d in zstd_includes])
+        include_dirs.update([os.path.join(actual_root, d) for d in zstd_includes])
         if support_legacy:
-            include_dirs.update([os.path.join(actual_root, d)
-                                 for d in zstd_includes_legacy])
+            include_dirs.update(
+                [os.path.join(actual_root, d) for d in zstd_includes_legacy]
+            )
     include_dirs = list(include_dirs)
 
     depends = [os.path.join(actual_root, p) for p in zstd_depends]
@@ -143,41 +155,40 @@
     compiler = distutils.ccompiler.new_compiler()
 
     # Needed for MSVC.
-    if hasattr(compiler, 'initialize'):
+    if hasattr(compiler, "initialize"):
         compiler.initialize()
 
-    if compiler.compiler_type == 'unix':
-        compiler_type = 'unix'
-    elif compiler.compiler_type == 'msvc':
-        compiler_type = 'msvc'
-    elif compiler.compiler_type == 'mingw32':
-        compiler_type = 'mingw32'
+    if compiler.compiler_type == "unix":
+        compiler_type = "unix"
+    elif compiler.compiler_type == "msvc":
+        compiler_type = "msvc"
+    elif compiler.compiler_type == "mingw32":
+        compiler_type = "mingw32"
     else:
-        raise Exception('unhandled compiler type: %s' %
-                        compiler.compiler_type)
+        raise Exception("unhandled compiler type: %s" % compiler.compiler_type)
 
-    extra_args = ['-DZSTD_MULTITHREAD']
+    extra_args = ["-DZSTD_MULTITHREAD"]
 
     if not system_zstd:
-        extra_args.append('-DZSTDLIB_VISIBILITY=')
-        extra_args.append('-DZDICTLIB_VISIBILITY=')
-        extra_args.append('-DZSTDERRORLIB_VISIBILITY=')
+        extra_args.append("-DZSTDLIB_VISIBILITY=")
+        extra_args.append("-DZDICTLIB_VISIBILITY=")
+        extra_args.append("-DZSTDERRORLIB_VISIBILITY=")
 
-        if compiler_type == 'unix':
-            extra_args.append('-fvisibility=hidden')
+        if compiler_type == "unix":
+            extra_args.append("-fvisibility=hidden")
 
     if not system_zstd and support_legacy:
-        extra_args.append('-DZSTD_LEGACY_SUPPORT=1')
+        extra_args.append("-DZSTD_LEGACY_SUPPORT=1")
 
     if warnings_as_errors:
-        if compiler_type in ('unix', 'mingw32'):
-            extra_args.append('-Werror')
-        elif compiler_type == 'msvc':
-            extra_args.append('/WX')
+        if compiler_type in ("unix", "mingw32"):
+            extra_args.append("-Werror")
+        elif compiler_type == "msvc":
+            extra_args.append("/WX")
         else:
             assert False
 
-    libraries = ['zstd'] if system_zstd else []
+    libraries = ["zstd"] if system_zstd else []
 
     # Python 3.7 doesn't like absolute paths. So normalize to relative.
     sources = [os.path.relpath(p, root) for p in sources]
@@ -185,8 +196,11 @@
     depends = [os.path.relpath(p, root) for p in depends]
 
     # TODO compile with optimizations.
-    return Extension(name, sources,
-                     include_dirs=include_dirs,
-                     depends=depends,
-                     extra_compile_args=extra_args,
-                     libraries=libraries)
+    return Extension(
+        name,
+        sources,
+        include_dirs=include_dirs,
+        depends=depends,
+        extra_compile_args=extra_args,
+        libraries=libraries,
+    )
--- a/contrib/python-zstandard/tests/common.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/common.py	Tue Jan 21 13:14:51 2020 -0500
@@ -3,6 +3,7 @@
 import io
 import os
 import types
+import unittest
 
 try:
     import hypothesis
@@ -10,39 +11,46 @@
     hypothesis = None
 
 
+class TestCase(unittest.TestCase):
+    if not getattr(unittest.TestCase, "assertRaisesRegex", False):
+        assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
+
+
 def make_cffi(cls):
     """Decorator to add CFFI versions of each test method."""
 
     # The module containing this class definition should
     # `import zstandard as zstd`. Otherwise things may blow up.
     mod = inspect.getmodule(cls)
-    if not hasattr(mod, 'zstd'):
+    if not hasattr(mod, "zstd"):
         raise Exception('test module does not contain "zstd" symbol')
 
-    if not hasattr(mod.zstd, 'backend'):
-        raise Exception('zstd symbol does not have "backend" attribute; did '
-                        'you `import zstandard as zstd`?')
+    if not hasattr(mod.zstd, "backend"):
+        raise Exception(
+            'zstd symbol does not have "backend" attribute; did '
+            "you `import zstandard as zstd`?"
+        )
 
     # If `import zstandard` already chose the cffi backend, there is nothing
     # for us to do: we only add the cffi variation if the default backend
     # is the C extension.
-    if mod.zstd.backend == 'cffi':
+    if mod.zstd.backend == "cffi":
         return cls
 
     old_env = dict(os.environ)
-    os.environ['PYTHON_ZSTANDARD_IMPORT_POLICY'] = 'cffi'
+    os.environ["PYTHON_ZSTANDARD_IMPORT_POLICY"] = "cffi"
     try:
         try:
-            mod_info = imp.find_module('zstandard')
-            mod = imp.load_module('zstandard_cffi', *mod_info)
+            mod_info = imp.find_module("zstandard")
+            mod = imp.load_module("zstandard_cffi", *mod_info)
         except ImportError:
             return cls
     finally:
         os.environ.clear()
         os.environ.update(old_env)
 
-    if mod.backend != 'cffi':
-        raise Exception('got the zstandard %s backend instead of cffi' % mod.backend)
+    if mod.backend != "cffi":
+        raise Exception("got the zstandard %s backend instead of cffi" % mod.backend)
 
     # If CFFI version is available, dynamically construct test methods
     # that use it.
@@ -52,27 +60,31 @@
         if not inspect.ismethod(fn) and not inspect.isfunction(fn):
             continue
 
-        if not fn.__name__.startswith('test_'):
+        if not fn.__name__.startswith("test_"):
             continue
 
-        name = '%s_cffi' % fn.__name__
+        name = "%s_cffi" % fn.__name__
 
         # Replace the "zstd" symbol with the CFFI module instance. Then copy
         # the function object and install it in a new attribute.
         if isinstance(fn, types.FunctionType):
             globs = dict(fn.__globals__)
-            globs['zstd'] = mod
-            new_fn = types.FunctionType(fn.__code__, globs, name,
-                                        fn.__defaults__, fn.__closure__)
+            globs["zstd"] = mod
+            new_fn = types.FunctionType(
+                fn.__code__, globs, name, fn.__defaults__, fn.__closure__
+            )
             new_method = new_fn
         else:
             globs = dict(fn.__func__.func_globals)
-            globs['zstd'] = mod
-            new_fn = types.FunctionType(fn.__func__.func_code, globs, name,
-                                        fn.__func__.func_defaults,
-                                        fn.__func__.func_closure)
-            new_method = types.UnboundMethodType(new_fn, fn.im_self,
-                                                 fn.im_class)
+            globs["zstd"] = mod
+            new_fn = types.FunctionType(
+                fn.__func__.func_code,
+                globs,
+                name,
+                fn.__func__.func_defaults,
+                fn.__func__.func_closure,
+            )
+            new_method = types.UnboundMethodType(new_fn, fn.im_self, fn.im_class)
 
         setattr(cls, name, new_method)
 
@@ -84,6 +96,7 @@
 
     This allows us to access written data after close().
     """
+
     def __init__(self, *args, **kwargs):
         super(NonClosingBytesIO, self).__init__(*args, **kwargs)
         self._saved_buffer = None
@@ -135,7 +148,7 @@
         dirs[:] = list(sorted(dirs))
         for f in sorted(files):
             try:
-                with open(os.path.join(root, f), 'rb') as fh:
+                with open(os.path.join(root, f), "rb") as fh:
                     data = fh.read()
                     if data:
                         _source_files.append(data)
@@ -154,11 +167,11 @@
 
 def generate_samples():
     inputs = [
-        b'foo',
-        b'bar',
-        b'abcdef',
-        b'sometext',
-        b'baz',
+        b"foo",
+        b"bar",
+        b"abcdef",
+        b"sometext",
+        b"baz",
     ]
 
     samples = []
@@ -173,13 +186,12 @@
 
 if hypothesis:
     default_settings = hypothesis.settings(deadline=10000)
-    hypothesis.settings.register_profile('default', default_settings)
+    hypothesis.settings.register_profile("default", default_settings)
 
     ci_settings = hypothesis.settings(deadline=20000, max_examples=1000)
-    hypothesis.settings.register_profile('ci', ci_settings)
+    hypothesis.settings.register_profile("ci", ci_settings)
 
     expensive_settings = hypothesis.settings(deadline=None, max_examples=10000)
-    hypothesis.settings.register_profile('expensive', expensive_settings)
+    hypothesis.settings.register_profile("expensive", expensive_settings)
 
-    hypothesis.settings.load_profile(
-        os.environ.get('HYPOTHESIS_PROFILE', 'default'))
+    hypothesis.settings.load_profile(os.environ.get("HYPOTHESIS_PROFILE", "default"))
--- a/contrib/python-zstandard/tests/test_buffer_util.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/test_buffer_util.py	Tue Jan 21 13:14:51 2020 -0500
@@ -3,104 +3,114 @@
 
 import zstandard as zstd
 
-ss = struct.Struct('=QQ')
+from .common import TestCase
+
+ss = struct.Struct("=QQ")
 
 
-class TestBufferWithSegments(unittest.TestCase):
+class TestBufferWithSegments(TestCase):
     def test_arguments(self):
-        if not hasattr(zstd, 'BufferWithSegments'):
-            self.skipTest('BufferWithSegments not available')
+        if not hasattr(zstd, "BufferWithSegments"):
+            self.skipTest("BufferWithSegments not available")
 
         with self.assertRaises(TypeError):
             zstd.BufferWithSegments()
 
         with self.assertRaises(TypeError):
-            zstd.BufferWithSegments(b'foo')
+            zstd.BufferWithSegments(b"foo")
 
         # Segments data should be a multiple of 16.
-        with self.assertRaisesRegexp(ValueError, 'segments array size is not a multiple of 16'):
-            zstd.BufferWithSegments(b'foo', b'\x00\x00')
+        with self.assertRaisesRegex(
+            ValueError, "segments array size is not a multiple of 16"
+        ):
+            zstd.BufferWithSegments(b"foo", b"\x00\x00")
 
     def test_invalid_offset(self):
-        if not hasattr(zstd, 'BufferWithSegments'):
-            self.skipTest('BufferWithSegments not available')
+        if not hasattr(zstd, "BufferWithSegments"):
+            self.skipTest("BufferWithSegments not available")
 
-        with self.assertRaisesRegexp(ValueError, 'offset within segments array references memory'):
-            zstd.BufferWithSegments(b'foo', ss.pack(0, 4))
+        with self.assertRaisesRegex(
+            ValueError, "offset within segments array references memory"
+        ):
+            zstd.BufferWithSegments(b"foo", ss.pack(0, 4))
 
     def test_invalid_getitem(self):
-        if not hasattr(zstd, 'BufferWithSegments'):
-            self.skipTest('BufferWithSegments not available')
+        if not hasattr(zstd, "BufferWithSegments"):
+            self.skipTest("BufferWithSegments not available")
 
-        b = zstd.BufferWithSegments(b'foo', ss.pack(0, 3))
+        b = zstd.BufferWithSegments(b"foo", ss.pack(0, 3))
 
-        with self.assertRaisesRegexp(IndexError, 'offset must be non-negative'):
+        with self.assertRaisesRegex(IndexError, "offset must be non-negative"):
             test = b[-10]
 
-        with self.assertRaisesRegexp(IndexError, 'offset must be less than 1'):
+        with self.assertRaisesRegex(IndexError, "offset must be less than 1"):
             test = b[1]
 
-        with self.assertRaisesRegexp(IndexError, 'offset must be less than 1'):
+        with self.assertRaisesRegex(IndexError, "offset must be less than 1"):
             test = b[2]
 
     def test_single(self):
-        if not hasattr(zstd, 'BufferWithSegments'):
-            self.skipTest('BufferWithSegments not available')
+        if not hasattr(zstd, "BufferWithSegments"):
+            self.skipTest("BufferWithSegments not available")
 
-        b = zstd.BufferWithSegments(b'foo', ss.pack(0, 3))
+        b = zstd.BufferWithSegments(b"foo", ss.pack(0, 3))
         self.assertEqual(len(b), 1)
         self.assertEqual(b.size, 3)
-        self.assertEqual(b.tobytes(), b'foo')
+        self.assertEqual(b.tobytes(), b"foo")
 
         self.assertEqual(len(b[0]), 3)
         self.assertEqual(b[0].offset, 0)
-        self.assertEqual(b[0].tobytes(), b'foo')
+        self.assertEqual(b[0].tobytes(), b"foo")
 
     def test_multiple(self):
-        if not hasattr(zstd, 'BufferWithSegments'):
-            self.skipTest('BufferWithSegments not available')
+        if not hasattr(zstd, "BufferWithSegments"):
+            self.skipTest("BufferWithSegments not available")
 
-        b = zstd.BufferWithSegments(b'foofooxfooxy', b''.join([ss.pack(0, 3),
-                                                               ss.pack(3, 4),
-                                                               ss.pack(7, 5)]))
+        b = zstd.BufferWithSegments(
+            b"foofooxfooxy", b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)])
+        )
         self.assertEqual(len(b), 3)
         self.assertEqual(b.size, 12)
-        self.assertEqual(b.tobytes(), b'foofooxfooxy')
+        self.assertEqual(b.tobytes(), b"foofooxfooxy")
 
-        self.assertEqual(b[0].tobytes(), b'foo')
-        self.assertEqual(b[1].tobytes(), b'foox')
-        self.assertEqual(b[2].tobytes(), b'fooxy')
+        self.assertEqual(b[0].tobytes(), b"foo")
+        self.assertEqual(b[1].tobytes(), b"foox")
+        self.assertEqual(b[2].tobytes(), b"fooxy")
 
 
-class TestBufferWithSegmentsCollection(unittest.TestCase):
+class TestBufferWithSegmentsCollection(TestCase):
     def test_empty_constructor(self):
-        if not hasattr(zstd, 'BufferWithSegmentsCollection'):
-            self.skipTest('BufferWithSegmentsCollection not available')
+        if not hasattr(zstd, "BufferWithSegmentsCollection"):
+            self.skipTest("BufferWithSegmentsCollection not available")
 
-        with self.assertRaisesRegexp(ValueError, 'must pass at least 1 argument'):
+        with self.assertRaisesRegex(ValueError, "must pass at least 1 argument"):
             zstd.BufferWithSegmentsCollection()
 
     def test_argument_validation(self):
-        if not hasattr(zstd, 'BufferWithSegmentsCollection'):
-            self.skipTest('BufferWithSegmentsCollection not available')
+        if not hasattr(zstd, "BufferWithSegmentsCollection"):
+            self.skipTest("BufferWithSegmentsCollection not available")
 
-        with self.assertRaisesRegexp(TypeError, 'arguments must be BufferWithSegments'):
+        with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"):
             zstd.BufferWithSegmentsCollection(None)
 
-        with self.assertRaisesRegexp(TypeError, 'arguments must be BufferWithSegments'):
-            zstd.BufferWithSegmentsCollection(zstd.BufferWithSegments(b'foo', ss.pack(0, 3)),
-                                              None)
+        with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"):
+            zstd.BufferWithSegmentsCollection(
+                zstd.BufferWithSegments(b"foo", ss.pack(0, 3)), None
+            )
 
-        with self.assertRaisesRegexp(ValueError, 'ZstdBufferWithSegments cannot be empty'):
-            zstd.BufferWithSegmentsCollection(zstd.BufferWithSegments(b'', b''))
+        with self.assertRaisesRegex(
+            ValueError, "ZstdBufferWithSegments cannot be empty"
+        ):
+            zstd.BufferWithSegmentsCollection(zstd.BufferWithSegments(b"", b""))
 
     def test_length(self):
-        if not hasattr(zstd, 'BufferWithSegmentsCollection'):
-            self.skipTest('BufferWithSegmentsCollection not available')
+        if not hasattr(zstd, "BufferWithSegmentsCollection"):
+            self.skipTest("BufferWithSegmentsCollection not available")
 
-        b1 = zstd.BufferWithSegments(b'foo', ss.pack(0, 3))
-        b2 = zstd.BufferWithSegments(b'barbaz', b''.join([ss.pack(0, 3),
-                                                          ss.pack(3, 3)]))
+        b1 = zstd.BufferWithSegments(b"foo", ss.pack(0, 3))
+        b2 = zstd.BufferWithSegments(
+            b"barbaz", b"".join([ss.pack(0, 3), ss.pack(3, 3)])
+        )
 
         c = zstd.BufferWithSegmentsCollection(b1)
         self.assertEqual(len(c), 1)
@@ -115,21 +125,22 @@
         self.assertEqual(c.size(), 9)
 
     def test_getitem(self):
-        if not hasattr(zstd, 'BufferWithSegmentsCollection'):
-            self.skipTest('BufferWithSegmentsCollection not available')
+        if not hasattr(zstd, "BufferWithSegmentsCollection"):
+            self.skipTest("BufferWithSegmentsCollection not available")
 
-        b1 = zstd.BufferWithSegments(b'foo', ss.pack(0, 3))
-        b2 = zstd.BufferWithSegments(b'barbaz', b''.join([ss.pack(0, 3),
-                                                          ss.pack(3, 3)]))
+        b1 = zstd.BufferWithSegments(b"foo", ss.pack(0, 3))
+        b2 = zstd.BufferWithSegments(
+            b"barbaz", b"".join([ss.pack(0, 3), ss.pack(3, 3)])
+        )
 
         c = zstd.BufferWithSegmentsCollection(b1, b2)
 
-        with self.assertRaisesRegexp(IndexError, 'offset must be less than 3'):
+        with self.assertRaisesRegex(IndexError, "offset must be less than 3"):
             c[3]
 
-        with self.assertRaisesRegexp(IndexError, 'offset must be less than 3'):
+        with self.assertRaisesRegex(IndexError, "offset must be less than 3"):
             c[4]
 
-        self.assertEqual(c[0].tobytes(), b'foo')
-        self.assertEqual(c[1].tobytes(), b'bar')
-        self.assertEqual(c[2].tobytes(), b'baz')
+        self.assertEqual(c[0].tobytes(), b"foo")
+        self.assertEqual(c[1].tobytes(), b"bar")
+        self.assertEqual(c[2].tobytes(), b"baz")
--- a/contrib/python-zstandard/tests/test_compressor.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/test_compressor.py	Tue Jan 21 13:14:51 2020 -0500
@@ -13,6 +13,7 @@
     make_cffi,
     NonClosingBytesIO,
     OpCountingBytesIO,
+    TestCase,
 )
 
 
@@ -23,14 +24,13 @@
 
 
 def multithreaded_chunk_size(level, source_size=0):
-    params = zstd.ZstdCompressionParameters.from_level(level,
-                                                       source_size=source_size)
+    params = zstd.ZstdCompressionParameters.from_level(level, source_size=source_size)
 
     return 1 << (params.window_log + 2)
 
 
 @make_cffi
-class TestCompressor(unittest.TestCase):
+class TestCompressor(TestCase):
     def test_level_bounds(self):
         with self.assertRaises(ValueError):
             zstd.ZstdCompressor(level=23)
@@ -41,11 +41,11 @@
 
 
 @make_cffi
-class TestCompressor_compress(unittest.TestCase):
+class TestCompressor_compress(TestCase):
     def test_compress_empty(self):
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
-        result = cctx.compress(b'')
-        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        result = cctx.compress(b"")
+        self.assertEqual(result, b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00")
         params = zstd.get_frame_parameters(result)
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
         self.assertEqual(params.window_size, 524288)
@@ -53,21 +53,21 @@
         self.assertFalse(params.has_checksum, 0)
 
         cctx = zstd.ZstdCompressor()
-        result = cctx.compress(b'')
-        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x20\x00\x01\x00\x00')
+        result = cctx.compress(b"")
+        self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x00\x01\x00\x00")
         params = zstd.get_frame_parameters(result)
         self.assertEqual(params.content_size, 0)
 
     def test_input_types(self):
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
-        expected = b'\x28\xb5\x2f\xfd\x00\x00\x19\x00\x00\x66\x6f\x6f'
+        expected = b"\x28\xb5\x2f\xfd\x00\x00\x19\x00\x00\x66\x6f\x6f"
 
         mutable_array = bytearray(3)
-        mutable_array[:] = b'foo'
+        mutable_array[:] = b"foo"
 
         sources = [
-            memoryview(b'foo'),
-            bytearray(b'foo'),
+            memoryview(b"foo"),
+            bytearray(b"foo"),
             mutable_array,
         ]
 
@@ -77,43 +77,46 @@
     def test_compress_large(self):
         chunks = []
         for i in range(255):
-            chunks.append(struct.Struct('>B').pack(i) * 16384)
+            chunks.append(struct.Struct(">B").pack(i) * 16384)
 
         cctx = zstd.ZstdCompressor(level=3, write_content_size=False)
-        result = cctx.compress(b''.join(chunks))
+        result = cctx.compress(b"".join(chunks))
         self.assertEqual(len(result), 999)
-        self.assertEqual(result[0:4], b'\x28\xb5\x2f\xfd')
+        self.assertEqual(result[0:4], b"\x28\xb5\x2f\xfd")
 
         # This matches the test for read_to_iter() below.
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
-        result = cctx.compress(b'f' * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b'o')
-        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00'
-                                 b'\x10\x66\x66\x01\x00\xfb\xff\x39\xc0'
-                                 b'\x02\x09\x00\x00\x6f')
+        result = cctx.compress(b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o")
+        self.assertEqual(
+            result,
+            b"\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00"
+            b"\x10\x66\x66\x01\x00\xfb\xff\x39\xc0"
+            b"\x02\x09\x00\x00\x6f",
+        )
 
     def test_negative_level(self):
         cctx = zstd.ZstdCompressor(level=-4)
-        result = cctx.compress(b'foo' * 256)
+        result = cctx.compress(b"foo" * 256)
 
     def test_no_magic(self):
-        params = zstd.ZstdCompressionParameters.from_level(
-            1, format=zstd.FORMAT_ZSTD1)
+        params = zstd.ZstdCompressionParameters.from_level(1, format=zstd.FORMAT_ZSTD1)
         cctx = zstd.ZstdCompressor(compression_params=params)
-        magic = cctx.compress(b'foobar')
+        magic = cctx.compress(b"foobar")
 
         params = zstd.ZstdCompressionParameters.from_level(
-            1, format=zstd.FORMAT_ZSTD1_MAGICLESS)
+            1, format=zstd.FORMAT_ZSTD1_MAGICLESS
+        )
         cctx = zstd.ZstdCompressor(compression_params=params)
-        no_magic = cctx.compress(b'foobar')
+        no_magic = cctx.compress(b"foobar")
 
-        self.assertEqual(magic[0:4], b'\x28\xb5\x2f\xfd')
+        self.assertEqual(magic[0:4], b"\x28\xb5\x2f\xfd")
         self.assertEqual(magic[4:], no_magic)
 
     def test_write_checksum(self):
         cctx = zstd.ZstdCompressor(level=1)
-        no_checksum = cctx.compress(b'foobar')
+        no_checksum = cctx.compress(b"foobar")
         cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
-        with_checksum = cctx.compress(b'foobar')
+        with_checksum = cctx.compress(b"foobar")
 
         self.assertEqual(len(with_checksum), len(no_checksum) + 4)
 
@@ -125,9 +128,9 @@
 
     def test_write_content_size(self):
         cctx = zstd.ZstdCompressor(level=1)
-        with_size = cctx.compress(b'foobar' * 256)
+        with_size = cctx.compress(b"foobar" * 256)
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
-        no_size = cctx.compress(b'foobar' * 256)
+        no_size = cctx.compress(b"foobar" * 256)
 
         self.assertEqual(len(with_size), len(no_size) + 1)
 
@@ -139,17 +142,17 @@
     def test_no_dict_id(self):
         samples = []
         for i in range(128):
-            samples.append(b'foo' * 64)
-            samples.append(b'bar' * 64)
-            samples.append(b'foobar' * 64)
+            samples.append(b"foo" * 64)
+            samples.append(b"bar" * 64)
+            samples.append(b"foobar" * 64)
 
         d = zstd.train_dictionary(1024, samples)
 
         cctx = zstd.ZstdCompressor(level=1, dict_data=d)
-        with_dict_id = cctx.compress(b'foobarfoobar')
+        with_dict_id = cctx.compress(b"foobarfoobar")
 
         cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False)
-        no_dict_id = cctx.compress(b'foobarfoobar')
+        no_dict_id = cctx.compress(b"foobarfoobar")
 
         self.assertEqual(len(with_dict_id), len(no_dict_id) + 4)
 
@@ -161,23 +164,23 @@
     def test_compress_dict_multiple(self):
         samples = []
         for i in range(128):
-            samples.append(b'foo' * 64)
-            samples.append(b'bar' * 64)
-            samples.append(b'foobar' * 64)
+            samples.append(b"foo" * 64)
+            samples.append(b"bar" * 64)
+            samples.append(b"foobar" * 64)
 
         d = zstd.train_dictionary(8192, samples)
 
         cctx = zstd.ZstdCompressor(level=1, dict_data=d)
 
         for i in range(32):
-            cctx.compress(b'foo bar foobar foo bar foobar')
+            cctx.compress(b"foo bar foobar foo bar foobar")
 
     def test_dict_precompute(self):
         samples = []
         for i in range(128):
-            samples.append(b'foo' * 64)
-            samples.append(b'bar' * 64)
-            samples.append(b'foobar' * 64)
+            samples.append(b"foo" * 64)
+            samples.append(b"bar" * 64)
+            samples.append(b"foobar" * 64)
 
         d = zstd.train_dictionary(8192, samples)
         d.precompute_compress(level=1)
@@ -185,11 +188,11 @@
         cctx = zstd.ZstdCompressor(level=1, dict_data=d)
 
         for i in range(32):
-            cctx.compress(b'foo bar foobar foo bar foobar')
+            cctx.compress(b"foo bar foobar foo bar foobar")
 
     def test_multithreaded(self):
         chunk_size = multithreaded_chunk_size(1)
-        source = b''.join([b'x' * chunk_size, b'y' * chunk_size])
+        source = b"".join([b"x" * chunk_size, b"y" * chunk_size])
 
         cctx = zstd.ZstdCompressor(level=1, threads=2)
         compressed = cctx.compress(source)
@@ -205,73 +208,72 @@
     def test_multithreaded_dict(self):
         samples = []
         for i in range(128):
-            samples.append(b'foo' * 64)
-            samples.append(b'bar' * 64)
-            samples.append(b'foobar' * 64)
+            samples.append(b"foo" * 64)
+            samples.append(b"bar" * 64)
+            samples.append(b"foobar" * 64)
 
         d = zstd.train_dictionary(1024, samples)
 
         cctx = zstd.ZstdCompressor(dict_data=d, threads=2)
 
-        result = cctx.compress(b'foo')
-        params = zstd.get_frame_parameters(result);
-        self.assertEqual(params.content_size, 3);
+        result = cctx.compress(b"foo")
+        params = zstd.get_frame_parameters(result)
+        self.assertEqual(params.content_size, 3)
         self.assertEqual(params.dict_id, d.dict_id())
 
-        self.assertEqual(result,
-                         b'\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00'
-                         b'\x66\x6f\x6f')
+        self.assertEqual(
+            result,
+            b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00" b"\x66\x6f\x6f",
+        )
 
     def test_multithreaded_compression_params(self):
         params = zstd.ZstdCompressionParameters.from_level(0, threads=2)
         cctx = zstd.ZstdCompressor(compression_params=params)
 
-        result = cctx.compress(b'foo')
-        params = zstd.get_frame_parameters(result);
-        self.assertEqual(params.content_size, 3);
+        result = cctx.compress(b"foo")
+        params = zstd.get_frame_parameters(result)
+        self.assertEqual(params.content_size, 3)
 
-        self.assertEqual(result,
-                         b'\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f')
+        self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f")
 
 
 @make_cffi
-class TestCompressor_compressobj(unittest.TestCase):
+class TestCompressor_compressobj(TestCase):
     def test_compressobj_empty(self):
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
         cobj = cctx.compressobj()
-        self.assertEqual(cobj.compress(b''), b'')
-        self.assertEqual(cobj.flush(),
-                         b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        self.assertEqual(cobj.compress(b""), b"")
+        self.assertEqual(cobj.flush(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00")
 
     def test_input_types(self):
-        expected = b'\x28\xb5\x2f\xfd\x00\x48\x19\x00\x00\x66\x6f\x6f'
+        expected = b"\x28\xb5\x2f\xfd\x00\x48\x19\x00\x00\x66\x6f\x6f"
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
 
         mutable_array = bytearray(3)
-        mutable_array[:] = b'foo'
+        mutable_array[:] = b"foo"
 
         sources = [
-            memoryview(b'foo'),
-            bytearray(b'foo'),
+            memoryview(b"foo"),
+            bytearray(b"foo"),
             mutable_array,
         ]
 
         for source in sources:
             cobj = cctx.compressobj()
-            self.assertEqual(cobj.compress(source), b'')
+            self.assertEqual(cobj.compress(source), b"")
             self.assertEqual(cobj.flush(), expected)
 
     def test_compressobj_large(self):
         chunks = []
         for i in range(255):
-            chunks.append(struct.Struct('>B').pack(i) * 16384)
+            chunks.append(struct.Struct(">B").pack(i) * 16384)
 
         cctx = zstd.ZstdCompressor(level=3)
         cobj = cctx.compressobj()
 
-        result = cobj.compress(b''.join(chunks)) + cobj.flush()
+        result = cobj.compress(b"".join(chunks)) + cobj.flush()
         self.assertEqual(len(result), 999)
-        self.assertEqual(result[0:4], b'\x28\xb5\x2f\xfd')
+        self.assertEqual(result[0:4], b"\x28\xb5\x2f\xfd")
 
         params = zstd.get_frame_parameters(result)
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
@@ -282,10 +284,10 @@
     def test_write_checksum(self):
         cctx = zstd.ZstdCompressor(level=1)
         cobj = cctx.compressobj()
-        no_checksum = cobj.compress(b'foobar') + cobj.flush()
+        no_checksum = cobj.compress(b"foobar") + cobj.flush()
         cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
         cobj = cctx.compressobj()
-        with_checksum = cobj.compress(b'foobar') + cobj.flush()
+        with_checksum = cobj.compress(b"foobar") + cobj.flush()
 
         no_params = zstd.get_frame_parameters(no_checksum)
         with_params = zstd.get_frame_parameters(with_checksum)
@@ -300,11 +302,11 @@
 
     def test_write_content_size(self):
         cctx = zstd.ZstdCompressor(level=1)
-        cobj = cctx.compressobj(size=len(b'foobar' * 256))
-        with_size = cobj.compress(b'foobar' * 256) + cobj.flush()
+        cobj = cctx.compressobj(size=len(b"foobar" * 256))
+        with_size = cobj.compress(b"foobar" * 256) + cobj.flush()
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
-        cobj = cctx.compressobj(size=len(b'foobar' * 256))
-        no_size = cobj.compress(b'foobar' * 256) + cobj.flush()
+        cobj = cctx.compressobj(size=len(b"foobar" * 256))
+        no_size = cobj.compress(b"foobar" * 256) + cobj.flush()
 
         no_params = zstd.get_frame_parameters(no_size)
         with_params = zstd.get_frame_parameters(with_size)
@@ -321,48 +323,53 @@
         cctx = zstd.ZstdCompressor()
         cobj = cctx.compressobj()
 
-        cobj.compress(b'foo')
+        cobj.compress(b"foo")
         cobj.flush()
 
-        with self.assertRaisesRegexp(zstd.ZstdError, r'cannot call compress\(\) after compressor'):
-            cobj.compress(b'foo')
+        with self.assertRaisesRegex(
+            zstd.ZstdError, r"cannot call compress\(\) after compressor"
+        ):
+            cobj.compress(b"foo")
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'compressor object already finished'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "compressor object already finished"
+        ):
             cobj.flush()
 
     def test_flush_block_repeated(self):
         cctx = zstd.ZstdCompressor(level=1)
         cobj = cctx.compressobj()
 
-        self.assertEqual(cobj.compress(b'foo'), b'')
-        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK),
-                         b'\x28\xb5\x2f\xfd\x00\x48\x18\x00\x00foo')
-        self.assertEqual(cobj.compress(b'bar'), b'')
+        self.assertEqual(cobj.compress(b"foo"), b"")
+        self.assertEqual(
+            cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK),
+            b"\x28\xb5\x2f\xfd\x00\x48\x18\x00\x00foo",
+        )
+        self.assertEqual(cobj.compress(b"bar"), b"")
         # 3 byte header plus content.
-        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK),
-                         b'\x18\x00\x00bar')
-        self.assertEqual(cobj.flush(), b'\x01\x00\x00')
+        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar")
+        self.assertEqual(cobj.flush(), b"\x01\x00\x00")
 
     def test_flush_empty_block(self):
         cctx = zstd.ZstdCompressor(write_checksum=True)
         cobj = cctx.compressobj()
 
-        cobj.compress(b'foobar')
+        cobj.compress(b"foobar")
         cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK)
         # No-op if no block is active (this is internal to zstd).
-        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b'')
+        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"")
 
         trailing = cobj.flush()
         # 3 bytes block header + 4 bytes frame checksum
         self.assertEqual(len(trailing), 7)
         header = trailing[0:3]
-        self.assertEqual(header, b'\x01\x00\x00')
+        self.assertEqual(header, b"\x01\x00\x00")
 
     def test_multithreaded(self):
         source = io.BytesIO()
-        source.write(b'a' * 1048576)
-        source.write(b'b' * 1048576)
-        source.write(b'c' * 1048576)
+        source.write(b"a" * 1048576)
+        source.write(b"b" * 1048576)
+        source.write(b"c" * 1048576)
         source.seek(0)
 
         cctx = zstd.ZstdCompressor(level=1, threads=2)
@@ -378,9 +385,9 @@
 
         chunks.append(cobj.flush())
 
-        compressed = b''.join(chunks)
+        compressed = b"".join(chunks)
 
-        self.assertEqual(len(compressed), 295)
+        self.assertEqual(len(compressed), 119)
 
     def test_frame_progression(self):
         cctx = zstd.ZstdCompressor()
@@ -389,7 +396,7 @@
 
         cobj = cctx.compressobj()
 
-        cobj.compress(b'foobar')
+        cobj.compress(b"foobar")
         self.assertEqual(cctx.frame_progression(), (6, 0, 0))
 
         cobj.flush()
@@ -399,20 +406,20 @@
         cctx = zstd.ZstdCompressor()
 
         cobj = cctx.compressobj(size=2)
-        with self.assertRaisesRegexp(zstd.ZstdError, 'Src size is incorrect'):
-            cobj.compress(b'foo')
+        with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"):
+            cobj.compress(b"foo")
 
         # Try another operation on this instance.
-        with self.assertRaisesRegexp(zstd.ZstdError, 'Src size is incorrect'):
-            cobj.compress(b'aa')
+        with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"):
+            cobj.compress(b"aa")
 
         # Try another operation on the compressor.
         cctx.compressobj(size=4)
-        cctx.compress(b'foobar')
+        cctx.compress(b"foobar")
 
 
 @make_cffi
-class TestCompressor_copy_stream(unittest.TestCase):
+class TestCompressor_copy_stream(TestCase):
     def test_no_read(self):
         source = object()
         dest = io.BytesIO()
@@ -438,13 +445,12 @@
         self.assertEqual(int(r), 0)
         self.assertEqual(w, 9)
 
-        self.assertEqual(dest.getvalue(),
-                         b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        self.assertEqual(dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00")
 
     def test_large_data(self):
         source = io.BytesIO()
         for i in range(255):
-            source.write(struct.Struct('>B').pack(i) * 16384)
+            source.write(struct.Struct(">B").pack(i) * 16384)
         source.seek(0)
 
         dest = io.BytesIO()
@@ -461,7 +467,7 @@
         self.assertFalse(params.has_checksum)
 
     def test_write_checksum(self):
-        source = io.BytesIO(b'foobar')
+        source = io.BytesIO(b"foobar")
         no_checksum = io.BytesIO()
 
         cctx = zstd.ZstdCompressor(level=1)
@@ -472,8 +478,7 @@
         cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
         cctx.copy_stream(source, with_checksum)
 
-        self.assertEqual(len(with_checksum.getvalue()),
-                         len(no_checksum.getvalue()) + 4)
+        self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4)
 
         no_params = zstd.get_frame_parameters(no_checksum.getvalue())
         with_params = zstd.get_frame_parameters(with_checksum.getvalue())
@@ -485,7 +490,7 @@
         self.assertTrue(with_params.has_checksum)
 
     def test_write_content_size(self):
-        source = io.BytesIO(b'foobar' * 256)
+        source = io.BytesIO(b"foobar" * 256)
         no_size = io.BytesIO()
 
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
@@ -497,16 +502,14 @@
         cctx.copy_stream(source, with_size)
 
         # Source content size is unknown, so no content size written.
-        self.assertEqual(len(with_size.getvalue()),
-                         len(no_size.getvalue()))
+        self.assertEqual(len(with_size.getvalue()), len(no_size.getvalue()))
 
         source.seek(0)
         with_size = io.BytesIO()
         cctx.copy_stream(source, with_size, size=len(source.getvalue()))
 
         # We specified source size, so content size header is present.
-        self.assertEqual(len(with_size.getvalue()),
-                         len(no_size.getvalue()) + 1)
+        self.assertEqual(len(with_size.getvalue()), len(no_size.getvalue()) + 1)
 
         no_params = zstd.get_frame_parameters(no_size.getvalue())
         with_params = zstd.get_frame_parameters(with_size.getvalue())
@@ -518,7 +521,7 @@
         self.assertFalse(with_params.has_checksum)
 
     def test_read_write_size(self):
-        source = OpCountingBytesIO(b'foobarfoobar')
+        source = OpCountingBytesIO(b"foobarfoobar")
         dest = OpCountingBytesIO()
         cctx = zstd.ZstdCompressor()
         r, w = cctx.copy_stream(source, dest, read_size=1, write_size=1)
@@ -530,16 +533,16 @@
 
     def test_multithreaded(self):
         source = io.BytesIO()
-        source.write(b'a' * 1048576)
-        source.write(b'b' * 1048576)
-        source.write(b'c' * 1048576)
+        source.write(b"a" * 1048576)
+        source.write(b"b" * 1048576)
+        source.write(b"c" * 1048576)
         source.seek(0)
 
         dest = io.BytesIO()
         cctx = zstd.ZstdCompressor(threads=2, write_content_size=False)
         r, w = cctx.copy_stream(source, dest)
         self.assertEqual(r, 3145728)
-        self.assertEqual(w, 295)
+        self.assertEqual(w, 111)
 
         params = zstd.get_frame_parameters(dest.getvalue())
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
@@ -559,15 +562,15 @@
 
     def test_bad_size(self):
         source = io.BytesIO()
-        source.write(b'a' * 32768)
-        source.write(b'b' * 32768)
+        source.write(b"a" * 32768)
+        source.write(b"b" * 32768)
         source.seek(0)
 
         dest = io.BytesIO()
 
         cctx = zstd.ZstdCompressor()
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'Src size is incorrect'):
+        with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"):
             cctx.copy_stream(source, dest, size=42)
 
         # Try another operation on this compressor.
@@ -577,31 +580,31 @@
 
 
 @make_cffi
-class TestCompressor_stream_reader(unittest.TestCase):
+class TestCompressor_stream_reader(TestCase):
     def test_context_manager(self):
         cctx = zstd.ZstdCompressor()
 
-        with cctx.stream_reader(b'foo') as reader:
-            with self.assertRaisesRegexp(ValueError, 'cannot __enter__ multiple times'):
+        with cctx.stream_reader(b"foo") as reader:
+            with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"):
                 with reader as reader2:
                     pass
 
     def test_no_context_manager(self):
         cctx = zstd.ZstdCompressor()
 
-        reader = cctx.stream_reader(b'foo')
+        reader = cctx.stream_reader(b"foo")
         reader.read(4)
         self.assertFalse(reader.closed)
 
         reader.close()
         self.assertTrue(reader.closed)
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
             reader.read(1)
 
     def test_not_implemented(self):
         cctx = zstd.ZstdCompressor()
 
-        with cctx.stream_reader(b'foo' * 60) as reader:
+        with cctx.stream_reader(b"foo" * 60) as reader:
             with self.assertRaises(io.UnsupportedOperation):
                 reader.readline()
 
@@ -618,12 +621,12 @@
                 reader.writelines([])
 
             with self.assertRaises(OSError):
-                reader.write(b'foo')
+                reader.write(b"foo")
 
     def test_constant_methods(self):
         cctx = zstd.ZstdCompressor()
 
-        with cctx.stream_reader(b'boo') as reader:
+        with cctx.stream_reader(b"boo") as reader:
             self.assertTrue(reader.readable())
             self.assertFalse(reader.writable())
             self.assertFalse(reader.seekable())
@@ -637,27 +640,29 @@
     def test_read_closed(self):
         cctx = zstd.ZstdCompressor()
 
-        with cctx.stream_reader(b'foo' * 60) as reader:
+        with cctx.stream_reader(b"foo" * 60) as reader:
             reader.close()
             self.assertTrue(reader.closed)
-            with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+            with self.assertRaisesRegex(ValueError, "stream is closed"):
                 reader.read(10)
 
     def test_read_sizes(self):
         cctx = zstd.ZstdCompressor()
-        foo = cctx.compress(b'foo')
+        foo = cctx.compress(b"foo")
 
-        with cctx.stream_reader(b'foo') as reader:
-            with self.assertRaisesRegexp(ValueError, 'cannot read negative amounts less than -1'):
+        with cctx.stream_reader(b"foo") as reader:
+            with self.assertRaisesRegex(
+                ValueError, "cannot read negative amounts less than -1"
+            ):
                 reader.read(-2)
 
-            self.assertEqual(reader.read(0), b'')
+            self.assertEqual(reader.read(0), b"")
             self.assertEqual(reader.read(), foo)
 
     def test_read_buffer(self):
         cctx = zstd.ZstdCompressor()
 
-        source = b''.join([b'foo' * 60, b'bar' * 60, b'baz' * 60])
+        source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
         frame = cctx.compress(source)
 
         with cctx.stream_reader(source) as reader:
@@ -667,13 +672,13 @@
             result = reader.read(8192)
             self.assertEqual(result, frame)
             self.assertEqual(reader.tell(), len(result))
-            self.assertEqual(reader.read(), b'')
+            self.assertEqual(reader.read(), b"")
             self.assertEqual(reader.tell(), len(result))
 
     def test_read_buffer_small_chunks(self):
         cctx = zstd.ZstdCompressor()
 
-        source = b'foo' * 60
+        source = b"foo" * 60
         chunks = []
 
         with cctx.stream_reader(source) as reader:
@@ -687,12 +692,12 @@
                 chunks.append(chunk)
                 self.assertEqual(reader.tell(), sum(map(len, chunks)))
 
-        self.assertEqual(b''.join(chunks), cctx.compress(source))
+        self.assertEqual(b"".join(chunks), cctx.compress(source))
 
     def test_read_stream(self):
         cctx = zstd.ZstdCompressor()
 
-        source = b''.join([b'foo' * 60, b'bar' * 60, b'baz' * 60])
+        source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
         frame = cctx.compress(source)
 
         with cctx.stream_reader(io.BytesIO(source), size=len(source)) as reader:
@@ -701,13 +706,13 @@
             chunk = reader.read(8192)
             self.assertEqual(chunk, frame)
             self.assertEqual(reader.tell(), len(chunk))
-            self.assertEqual(reader.read(), b'')
+            self.assertEqual(reader.read(), b"")
             self.assertEqual(reader.tell(), len(chunk))
 
     def test_read_stream_small_chunks(self):
         cctx = zstd.ZstdCompressor()
 
-        source = b'foo' * 60
+        source = b"foo" * 60
         chunks = []
 
         with cctx.stream_reader(io.BytesIO(source), size=len(source)) as reader:
@@ -721,25 +726,25 @@
                 chunks.append(chunk)
                 self.assertEqual(reader.tell(), sum(map(len, chunks)))
 
-        self.assertEqual(b''.join(chunks), cctx.compress(source))
+        self.assertEqual(b"".join(chunks), cctx.compress(source))
 
     def test_read_after_exit(self):
         cctx = zstd.ZstdCompressor()
 
-        with cctx.stream_reader(b'foo' * 60) as reader:
+        with cctx.stream_reader(b"foo" * 60) as reader:
             while reader.read(8192):
                 pass
 
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
             reader.read(10)
 
     def test_bad_size(self):
         cctx = zstd.ZstdCompressor()
 
-        source = io.BytesIO(b'foobar')
+        source = io.BytesIO(b"foobar")
 
         with cctx.stream_reader(source, size=2) as reader:
-            with self.assertRaisesRegexp(zstd.ZstdError, 'Src size is incorrect'):
+            with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"):
                 reader.read(10)
 
         # Try another compression operation.
@@ -748,36 +753,36 @@
 
     def test_readall(self):
         cctx = zstd.ZstdCompressor()
-        frame = cctx.compress(b'foo' * 1024)
+        frame = cctx.compress(b"foo" * 1024)
 
-        reader = cctx.stream_reader(b'foo' * 1024)
+        reader = cctx.stream_reader(b"foo" * 1024)
         self.assertEqual(reader.readall(), frame)
 
     def test_readinto(self):
         cctx = zstd.ZstdCompressor()
-        foo = cctx.compress(b'foo')
+        foo = cctx.compress(b"foo")
 
-        reader = cctx.stream_reader(b'foo')
+        reader = cctx.stream_reader(b"foo")
         with self.assertRaises(Exception):
-            reader.readinto(b'foobar')
+            reader.readinto(b"foobar")
 
         # readinto() with sufficiently large destination.
         b = bytearray(1024)
-        reader = cctx.stream_reader(b'foo')
+        reader = cctx.stream_reader(b"foo")
         self.assertEqual(reader.readinto(b), len(foo))
-        self.assertEqual(b[0:len(foo)], foo)
+        self.assertEqual(b[0 : len(foo)], foo)
         self.assertEqual(reader.readinto(b), 0)
-        self.assertEqual(b[0:len(foo)], foo)
+        self.assertEqual(b[0 : len(foo)], foo)
 
         # readinto() with small reads.
         b = bytearray(1024)
-        reader = cctx.stream_reader(b'foo', read_size=1)
+        reader = cctx.stream_reader(b"foo", read_size=1)
         self.assertEqual(reader.readinto(b), len(foo))
-        self.assertEqual(b[0:len(foo)], foo)
+        self.assertEqual(b[0 : len(foo)], foo)
 
         # Too small destination buffer.
         b = bytearray(2)
-        reader = cctx.stream_reader(b'foo')
+        reader = cctx.stream_reader(b"foo")
         self.assertEqual(reader.readinto(b), 2)
         self.assertEqual(b[:], foo[0:2])
         self.assertEqual(reader.readinto(b), 2)
@@ -787,41 +792,41 @@
 
     def test_readinto1(self):
         cctx = zstd.ZstdCompressor()
-        foo = b''.join(cctx.read_to_iter(io.BytesIO(b'foo')))
+        foo = b"".join(cctx.read_to_iter(io.BytesIO(b"foo")))
 
-        reader = cctx.stream_reader(b'foo')
+        reader = cctx.stream_reader(b"foo")
         with self.assertRaises(Exception):
-            reader.readinto1(b'foobar')
+            reader.readinto1(b"foobar")
 
         b = bytearray(1024)
-        source = OpCountingBytesIO(b'foo')
+        source = OpCountingBytesIO(b"foo")
         reader = cctx.stream_reader(source)
         self.assertEqual(reader.readinto1(b), len(foo))
-        self.assertEqual(b[0:len(foo)], foo)
+        self.assertEqual(b[0 : len(foo)], foo)
         self.assertEqual(source._read_count, 2)
 
         # readinto1() with small reads.
         b = bytearray(1024)
-        source = OpCountingBytesIO(b'foo')
+        source = OpCountingBytesIO(b"foo")
         reader = cctx.stream_reader(source, read_size=1)
         self.assertEqual(reader.readinto1(b), len(foo))
-        self.assertEqual(b[0:len(foo)], foo)
+        self.assertEqual(b[0 : len(foo)], foo)
         self.assertEqual(source._read_count, 4)
 
     def test_read1(self):
         cctx = zstd.ZstdCompressor()
-        foo = b''.join(cctx.read_to_iter(io.BytesIO(b'foo')))
+        foo = b"".join(cctx.read_to_iter(io.BytesIO(b"foo")))
 
-        b = OpCountingBytesIO(b'foo')
+        b = OpCountingBytesIO(b"foo")
         reader = cctx.stream_reader(b)
 
         self.assertEqual(reader.read1(), foo)
         self.assertEqual(b._read_count, 2)
 
-        b = OpCountingBytesIO(b'foo')
+        b = OpCountingBytesIO(b"foo")
         reader = cctx.stream_reader(b)
 
-        self.assertEqual(reader.read1(0), b'')
+        self.assertEqual(reader.read1(0), b"")
         self.assertEqual(reader.read1(2), foo[0:2])
         self.assertEqual(b._read_count, 2)
         self.assertEqual(reader.read1(2), foo[2:4])
@@ -829,7 +834,7 @@
 
 
 @make_cffi
-class TestCompressor_stream_writer(unittest.TestCase):
+class TestCompressor_stream_writer(TestCase):
     def test_io_api(self):
         buffer = io.BytesIO()
         cctx = zstd.ZstdCompressor()
@@ -899,7 +904,7 @@
         self.assertFalse(writer.closed)
 
     def test_fileno_file(self):
-        with tempfile.TemporaryFile('wb') as tf:
+        with tempfile.TemporaryFile("wb") as tf:
             cctx = zstd.ZstdCompressor()
             writer = cctx.stream_writer(tf)
 
@@ -910,33 +915,35 @@
         cctx = zstd.ZstdCompressor(level=1)
         writer = cctx.stream_writer(buffer)
 
-        writer.write(b'foo' * 1024)
+        writer.write(b"foo" * 1024)
         self.assertFalse(writer.closed)
         self.assertFalse(buffer.closed)
         writer.close()
         self.assertTrue(writer.closed)
         self.assertTrue(buffer.closed)
 
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
-            writer.write(b'foo')
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
+            writer.write(b"foo")
 
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
             writer.flush()
 
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
             with writer:
                 pass
 
-        self.assertEqual(buffer.getvalue(),
-                         b'\x28\xb5\x2f\xfd\x00\x48\x55\x00\x00\x18\x66\x6f'
-                         b'\x6f\x01\x00\xfa\xd3\x77\x43')
+        self.assertEqual(
+            buffer.getvalue(),
+            b"\x28\xb5\x2f\xfd\x00\x48\x55\x00\x00\x18\x66\x6f"
+            b"\x6f\x01\x00\xfa\xd3\x77\x43",
+        )
 
         # Context manager exit should close stream.
         buffer = io.BytesIO()
         writer = cctx.stream_writer(buffer)
 
         with writer:
-            writer.write(b'foo')
+            writer.write(b"foo")
 
         self.assertTrue(writer.closed)
 
@@ -944,10 +951,10 @@
         buffer = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
         with cctx.stream_writer(buffer) as compressor:
-            compressor.write(b'')
+            compressor.write(b"")
 
         result = buffer.getvalue()
-        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        self.assertEqual(result, b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00")
 
         params = zstd.get_frame_parameters(result)
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
@@ -958,11 +965,11 @@
         # Test without context manager.
         buffer = io.BytesIO()
         compressor = cctx.stream_writer(buffer)
-        self.assertEqual(compressor.write(b''), 0)
-        self.assertEqual(buffer.getvalue(), b'')
+        self.assertEqual(compressor.write(b""), 0)
+        self.assertEqual(buffer.getvalue(), b"")
         self.assertEqual(compressor.flush(zstd.FLUSH_FRAME), 9)
         result = buffer.getvalue()
-        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        self.assertEqual(result, b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00")
 
         params = zstd.get_frame_parameters(result)
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
@@ -972,18 +979,18 @@
 
         # Test write_return_read=True
         compressor = cctx.stream_writer(buffer, write_return_read=True)
-        self.assertEqual(compressor.write(b''), 0)
+        self.assertEqual(compressor.write(b""), 0)
 
     def test_input_types(self):
-        expected = b'\x28\xb5\x2f\xfd\x00\x48\x19\x00\x00\x66\x6f\x6f'
+        expected = b"\x28\xb5\x2f\xfd\x00\x48\x19\x00\x00\x66\x6f\x6f"
         cctx = zstd.ZstdCompressor(level=1)
 
         mutable_array = bytearray(3)
-        mutable_array[:] = b'foo'
+        mutable_array[:] = b"foo"
 
         sources = [
-            memoryview(b'foo'),
-            bytearray(b'foo'),
+            memoryview(b"foo"),
+            bytearray(b"foo"),
             mutable_array,
         ]
 
@@ -1001,51 +1008,55 @@
         buffer = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(level=5)
         with cctx.stream_writer(buffer) as compressor:
-            self.assertEqual(compressor.write(b'foo'), 0)
-            self.assertEqual(compressor.write(b'bar'), 0)
-            self.assertEqual(compressor.write(b'x' * 8192), 0)
+            self.assertEqual(compressor.write(b"foo"), 0)
+            self.assertEqual(compressor.write(b"bar"), 0)
+            self.assertEqual(compressor.write(b"x" * 8192), 0)
 
         result = buffer.getvalue()
-        self.assertEqual(result,
-                         b'\x28\xb5\x2f\xfd\x00\x58\x75\x00\x00\x38\x66\x6f'
-                         b'\x6f\x62\x61\x72\x78\x01\x00\xfc\xdf\x03\x23')
+        self.assertEqual(
+            result,
+            b"\x28\xb5\x2f\xfd\x00\x58\x75\x00\x00\x38\x66\x6f"
+            b"\x6f\x62\x61\x72\x78\x01\x00\xfc\xdf\x03\x23",
+        )
 
         # Test without context manager.
         buffer = io.BytesIO()
         compressor = cctx.stream_writer(buffer)
-        self.assertEqual(compressor.write(b'foo'), 0)
-        self.assertEqual(compressor.write(b'bar'), 0)
-        self.assertEqual(compressor.write(b'x' * 8192), 0)
+        self.assertEqual(compressor.write(b"foo"), 0)
+        self.assertEqual(compressor.write(b"bar"), 0)
+        self.assertEqual(compressor.write(b"x" * 8192), 0)
         self.assertEqual(compressor.flush(zstd.FLUSH_FRAME), 23)
         result = buffer.getvalue()
-        self.assertEqual(result,
-                         b'\x28\xb5\x2f\xfd\x00\x58\x75\x00\x00\x38\x66\x6f'
-                         b'\x6f\x62\x61\x72\x78\x01\x00\xfc\xdf\x03\x23')
+        self.assertEqual(
+            result,
+            b"\x28\xb5\x2f\xfd\x00\x58\x75\x00\x00\x38\x66\x6f"
+            b"\x6f\x62\x61\x72\x78\x01\x00\xfc\xdf\x03\x23",
+        )
 
         # Test with write_return_read=True.
         compressor = cctx.stream_writer(buffer, write_return_read=True)
-        self.assertEqual(compressor.write(b'foo'), 3)
-        self.assertEqual(compressor.write(b'barbiz'), 6)
-        self.assertEqual(compressor.write(b'x' * 8192), 8192)
+        self.assertEqual(compressor.write(b"foo"), 3)
+        self.assertEqual(compressor.write(b"barbiz"), 6)
+        self.assertEqual(compressor.write(b"x" * 8192), 8192)
 
     def test_dictionary(self):
         samples = []
         for i in range(128):
-            samples.append(b'foo' * 64)
-            samples.append(b'bar' * 64)
-            samples.append(b'foobar' * 64)
+            samples.append(b"foo" * 64)
+            samples.append(b"bar" * 64)
+            samples.append(b"foobar" * 64)
 
         d = zstd.train_dictionary(8192, samples)
 
         h = hashlib.sha1(d.as_bytes()).hexdigest()
-        self.assertEqual(h, '7a2e59a876db958f74257141045af8f912e00d4e')
+        self.assertEqual(h, "7a2e59a876db958f74257141045af8f912e00d4e")
 
         buffer = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(level=9, dict_data=d)
         with cctx.stream_writer(buffer) as compressor:
-            self.assertEqual(compressor.write(b'foo'), 0)
-            self.assertEqual(compressor.write(b'bar'), 0)
-            self.assertEqual(compressor.write(b'foo' * 16384), 0)
+            self.assertEqual(compressor.write(b"foo"), 0)
+            self.assertEqual(compressor.write(b"bar"), 0)
+            self.assertEqual(compressor.write(b"foo" * 16384), 0)
 
         compressed = buffer.getvalue()
 
@@ -1056,14 +1067,15 @@
         self.assertFalse(params.has_checksum)
 
         h = hashlib.sha1(compressed).hexdigest()
-        self.assertEqual(h, '0a7c05635061f58039727cdbe76388c6f4cfef06')
+        self.assertEqual(h, "0a7c05635061f58039727cdbe76388c6f4cfef06")
 
-        source = b'foo' + b'bar' + (b'foo' * 16384)
+        source = b"foo" + b"bar" + (b"foo" * 16384)
 
         dctx = zstd.ZstdDecompressor(dict_data=d)
 
-        self.assertEqual(dctx.decompress(compressed, max_output_size=len(source)),
-                         source)
+        self.assertEqual(
+            dctx.decompress(compressed, max_output_size=len(source)), source
+        )
 
     def test_compression_params(self):
         params = zstd.ZstdCompressionParameters(
@@ -1073,14 +1085,15 @@
             min_match=5,
             search_log=4,
             target_length=10,
-            strategy=zstd.STRATEGY_FAST)
+            strategy=zstd.STRATEGY_FAST,
+        )
 
         buffer = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(compression_params=params)
         with cctx.stream_writer(buffer) as compressor:
-            self.assertEqual(compressor.write(b'foo'), 0)
-            self.assertEqual(compressor.write(b'bar'), 0)
-            self.assertEqual(compressor.write(b'foobar' * 16384), 0)
+            self.assertEqual(compressor.write(b"foo"), 0)
+            self.assertEqual(compressor.write(b"bar"), 0)
+            self.assertEqual(compressor.write(b"foobar" * 16384), 0)
 
         compressed = buffer.getvalue()
 
@@ -1091,18 +1104,18 @@
         self.assertFalse(params.has_checksum)
 
         h = hashlib.sha1(compressed).hexdigest()
-        self.assertEqual(h, 'dd4bb7d37c1a0235b38a2f6b462814376843ef0b')
+        self.assertEqual(h, "dd4bb7d37c1a0235b38a2f6b462814376843ef0b")
 
     def test_write_checksum(self):
         no_checksum = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(level=1)
         with cctx.stream_writer(no_checksum) as compressor:
-            self.assertEqual(compressor.write(b'foobar'), 0)
+            self.assertEqual(compressor.write(b"foobar"), 0)
 
         with_checksum = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
         with cctx.stream_writer(with_checksum) as compressor:
-            self.assertEqual(compressor.write(b'foobar'), 0)
+            self.assertEqual(compressor.write(b"foobar"), 0)
 
         no_params = zstd.get_frame_parameters(no_checksum.getvalue())
         with_params = zstd.get_frame_parameters(with_checksum.getvalue())
@@ -1113,29 +1126,27 @@
         self.assertFalse(no_params.has_checksum)
         self.assertTrue(with_params.has_checksum)
 
-        self.assertEqual(len(with_checksum.getvalue()),
-                         len(no_checksum.getvalue()) + 4)
+        self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4)
 
     def test_write_content_size(self):
         no_size = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
         with cctx.stream_writer(no_size) as compressor:
-            self.assertEqual(compressor.write(b'foobar' * 256), 0)
+            self.assertEqual(compressor.write(b"foobar" * 256), 0)
 
         with_size = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(level=1)
         with cctx.stream_writer(with_size) as compressor:
-            self.assertEqual(compressor.write(b'foobar' * 256), 0)
+            self.assertEqual(compressor.write(b"foobar" * 256), 0)
 
         # Source size is not known in streaming mode, so header not
         # written.
-        self.assertEqual(len(with_size.getvalue()),
-                         len(no_size.getvalue()))
+        self.assertEqual(len(with_size.getvalue()), len(no_size.getvalue()))
 
         # Declaring size will write the header.
         with_size = NonClosingBytesIO()
-        with cctx.stream_writer(with_size, size=len(b'foobar' * 256)) as compressor:
-            self.assertEqual(compressor.write(b'foobar' * 256), 0)
+        with cctx.stream_writer(with_size, size=len(b"foobar" * 256)) as compressor:
+            self.assertEqual(compressor.write(b"foobar" * 256), 0)
 
         no_params = zstd.get_frame_parameters(no_size.getvalue())
         with_params = zstd.get_frame_parameters(with_size.getvalue())
@@ -1146,31 +1157,30 @@
         self.assertFalse(no_params.has_checksum)
         self.assertFalse(with_params.has_checksum)
 
-        self.assertEqual(len(with_size.getvalue()),
-                         len(no_size.getvalue()) + 1)
+        self.assertEqual(len(with_size.getvalue()), len(no_size.getvalue()) + 1)
 
     def test_no_dict_id(self):
         samples = []
         for i in range(128):
-            samples.append(b'foo' * 64)
-            samples.append(b'bar' * 64)
-            samples.append(b'foobar' * 64)
+            samples.append(b"foo" * 64)
+            samples.append(b"bar" * 64)
+            samples.append(b"foobar" * 64)
 
         d = zstd.train_dictionary(1024, samples)
 
         with_dict_id = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(level=1, dict_data=d)
         with cctx.stream_writer(with_dict_id) as compressor:
-            self.assertEqual(compressor.write(b'foobarfoobar'), 0)
+            self.assertEqual(compressor.write(b"foobarfoobar"), 0)
 
-        self.assertEqual(with_dict_id.getvalue()[4:5], b'\x03')
+        self.assertEqual(with_dict_id.getvalue()[4:5], b"\x03")
 
         cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False)
         no_dict_id = NonClosingBytesIO()
         with cctx.stream_writer(no_dict_id) as compressor:
-            self.assertEqual(compressor.write(b'foobarfoobar'), 0)
+            self.assertEqual(compressor.write(b"foobarfoobar"), 0)
 
-        self.assertEqual(no_dict_id.getvalue()[4:5], b'\x00')
+        self.assertEqual(no_dict_id.getvalue()[4:5], b"\x00")
 
         no_params = zstd.get_frame_parameters(no_dict_id.getvalue())
         with_params = zstd.get_frame_parameters(with_dict_id.getvalue())
@@ -1181,14 +1191,13 @@
         self.assertFalse(no_params.has_checksum)
         self.assertFalse(with_params.has_checksum)
 
-        self.assertEqual(len(with_dict_id.getvalue()),
-                         len(no_dict_id.getvalue()) + 4)
+        self.assertEqual(len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4)
 
     def test_memory_size(self):
         cctx = zstd.ZstdCompressor(level=3)
         buffer = io.BytesIO()
         with cctx.stream_writer(buffer) as compressor:
-            compressor.write(b'foo')
+            compressor.write(b"foo")
             size = compressor.memory_size()
 
         self.assertGreater(size, 100000)
@@ -1197,9 +1206,9 @@
         cctx = zstd.ZstdCompressor(level=3)
         dest = OpCountingBytesIO()
         with cctx.stream_writer(dest, write_size=1) as compressor:
-            self.assertEqual(compressor.write(b'foo'), 0)
-            self.assertEqual(compressor.write(b'bar'), 0)
-            self.assertEqual(compressor.write(b'foobar'), 0)
+            self.assertEqual(compressor.write(b"foo"), 0)
+            self.assertEqual(compressor.write(b"bar"), 0)
+            self.assertEqual(compressor.write(b"foobar"), 0)
 
         self.assertEqual(len(dest.getvalue()), dest._write_count)
 
@@ -1207,15 +1216,15 @@
         cctx = zstd.ZstdCompressor(level=3)
         dest = OpCountingBytesIO()
         with cctx.stream_writer(dest) as compressor:
-            self.assertEqual(compressor.write(b'foo'), 0)
+            self.assertEqual(compressor.write(b"foo"), 0)
             self.assertEqual(dest._write_count, 0)
             self.assertEqual(compressor.flush(), 12)
             self.assertEqual(dest._write_count, 1)
-            self.assertEqual(compressor.write(b'bar'), 0)
+            self.assertEqual(compressor.write(b"bar"), 0)
             self.assertEqual(dest._write_count, 1)
             self.assertEqual(compressor.flush(), 6)
             self.assertEqual(dest._write_count, 2)
-            self.assertEqual(compressor.write(b'baz'), 0)
+            self.assertEqual(compressor.write(b"baz"), 0)
 
         self.assertEqual(dest._write_count, 3)
 
@@ -1223,7 +1232,7 @@
         cctx = zstd.ZstdCompressor(level=3, write_checksum=True)
         dest = OpCountingBytesIO()
         with cctx.stream_writer(dest) as compressor:
-            self.assertEqual(compressor.write(b'foobar' * 8192), 0)
+            self.assertEqual(compressor.write(b"foobar" * 8192), 0)
             count = dest._write_count
             offset = dest.tell()
             self.assertEqual(compressor.flush(), 23)
@@ -1238,41 +1247,43 @@
         self.assertEqual(len(trailing), 7)
 
         header = trailing[0:3]
-        self.assertEqual(header, b'\x01\x00\x00')
+        self.assertEqual(header, b"\x01\x00\x00")
 
     def test_flush_frame(self):
         cctx = zstd.ZstdCompressor(level=3)
         dest = OpCountingBytesIO()
 
         with cctx.stream_writer(dest) as compressor:
-            self.assertEqual(compressor.write(b'foobar' * 8192), 0)
+            self.assertEqual(compressor.write(b"foobar" * 8192), 0)
             self.assertEqual(compressor.flush(zstd.FLUSH_FRAME), 23)
-            compressor.write(b'biz' * 16384)
+            compressor.write(b"biz" * 16384)
 
-        self.assertEqual(dest.getvalue(),
-                         # Frame 1.
-                         b'\x28\xb5\x2f\xfd\x00\x58\x75\x00\x00\x30\x66\x6f\x6f'
-                         b'\x62\x61\x72\x01\x00\xf7\xbf\xe8\xa5\x08'
-                         # Frame 2.
-                         b'\x28\xb5\x2f\xfd\x00\x58\x5d\x00\x00\x18\x62\x69\x7a'
-                         b'\x01\x00\xfa\x3f\x75\x37\x04')
+        self.assertEqual(
+            dest.getvalue(),
+            # Frame 1.
+            b"\x28\xb5\x2f\xfd\x00\x58\x75\x00\x00\x30\x66\x6f\x6f"
+            b"\x62\x61\x72\x01\x00\xf7\xbf\xe8\xa5\x08"
+            # Frame 2.
+            b"\x28\xb5\x2f\xfd\x00\x58\x5d\x00\x00\x18\x62\x69\x7a"
+            b"\x01\x00\xfa\x3f\x75\x37\x04",
+        )
 
     def test_bad_flush_mode(self):
         cctx = zstd.ZstdCompressor()
         dest = io.BytesIO()
         with cctx.stream_writer(dest) as compressor:
-            with self.assertRaisesRegexp(ValueError, 'unknown flush_mode: 42'):
+            with self.assertRaisesRegex(ValueError, "unknown flush_mode: 42"):
                 compressor.flush(flush_mode=42)
 
     def test_multithreaded(self):
         dest = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(threads=2)
         with cctx.stream_writer(dest) as compressor:
-            compressor.write(b'a' * 1048576)
-            compressor.write(b'b' * 1048576)
-            compressor.write(b'c' * 1048576)
+            compressor.write(b"a" * 1048576)
+            compressor.write(b"b" * 1048576)
+            compressor.write(b"c" * 1048576)
 
-        self.assertEqual(len(dest.getvalue()), 295)
+        self.assertEqual(len(dest.getvalue()), 111)
 
     def test_tell(self):
         dest = io.BytesIO()
@@ -1281,7 +1292,7 @@
             self.assertEqual(compressor.tell(), 0)
 
             for i in range(256):
-                compressor.write(b'foo' * (i + 1))
+                compressor.write(b"foo" * (i + 1))
                 self.assertEqual(compressor.tell(), dest.tell())
 
     def test_bad_size(self):
@@ -1289,9 +1300,9 @@
 
         dest = io.BytesIO()
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'Src size is incorrect'):
+        with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"):
             with cctx.stream_writer(dest, size=2) as compressor:
-                compressor.write(b'foo')
+                compressor.write(b"foo")
 
         # Test another operation.
         with cctx.stream_writer(dest, size=42):
@@ -1301,20 +1312,20 @@
         dest = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor()
         with cctx.stream_writer(dest) as compressor:
-            with tarfile.open('tf', mode='w|', fileobj=compressor) as tf:
-                tf.add(__file__, 'test_compressor.py')
+            with tarfile.open("tf", mode="w|", fileobj=compressor) as tf:
+                tf.add(__file__, "test_compressor.py")
 
         dest = io.BytesIO(dest.getvalue())
 
         dctx = zstd.ZstdDecompressor()
         with dctx.stream_reader(dest) as reader:
-            with tarfile.open(mode='r|', fileobj=reader) as tf:
+            with tarfile.open(mode="r|", fileobj=reader) as tf:
                 for member in tf:
-                    self.assertEqual(member.name, 'test_compressor.py')
+                    self.assertEqual(member.name, "test_compressor.py")
 
 
 @make_cffi
-class TestCompressor_read_to_iter(unittest.TestCase):
+class TestCompressor_read_to_iter(TestCase):
     def test_type_validation(self):
         cctx = zstd.ZstdCompressor()
 
@@ -1323,10 +1334,10 @@
             pass
 
         # Buffer protocol works.
-        for chunk in cctx.read_to_iter(b'foobar'):
+        for chunk in cctx.read_to_iter(b"foobar"):
             pass
 
-        with self.assertRaisesRegexp(ValueError, 'must pass an object with a read'):
+        with self.assertRaisesRegex(ValueError, "must pass an object with a read"):
             for chunk in cctx.read_to_iter(True):
                 pass
 
@@ -1337,22 +1348,22 @@
         it = cctx.read_to_iter(source)
         chunks = list(it)
         self.assertEqual(len(chunks), 1)
-        compressed = b''.join(chunks)
-        self.assertEqual(compressed, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        compressed = b"".join(chunks)
+        self.assertEqual(compressed, b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00")
 
         # And again with the buffer protocol.
-        it = cctx.read_to_iter(b'')
+        it = cctx.read_to_iter(b"")
         chunks = list(it)
         self.assertEqual(len(chunks), 1)
-        compressed2 = b''.join(chunks)
+        compressed2 = b"".join(chunks)
         self.assertEqual(compressed2, compressed)
 
     def test_read_large(self):
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
 
         source = io.BytesIO()
-        source.write(b'f' * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE)
-        source.write(b'o')
+        source.write(b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE)
+        source.write(b"o")
         source.seek(0)
 
         # Creating an iterator should not perform any compression until
@@ -1380,9 +1391,9 @@
             next(it)
 
         # We should get the same output as the one-shot compression mechanism.
-        self.assertEqual(b''.join(chunks), cctx.compress(source.getvalue()))
+        self.assertEqual(b"".join(chunks), cctx.compress(source.getvalue()))
 
-        params = zstd.get_frame_parameters(b''.join(chunks))
+        params = zstd.get_frame_parameters(b"".join(chunks))
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
         self.assertEqual(params.window_size, 262144)
         self.assertEqual(params.dict_id, 0)
@@ -1393,16 +1404,16 @@
         chunks = list(it)
         self.assertEqual(len(chunks), 2)
 
-        params = zstd.get_frame_parameters(b''.join(chunks))
+        params = zstd.get_frame_parameters(b"".join(chunks))
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
-        #self.assertEqual(params.window_size, 262144)
+        # self.assertEqual(params.window_size, 262144)
         self.assertEqual(params.dict_id, 0)
         self.assertFalse(params.has_checksum)
 
-        self.assertEqual(b''.join(chunks), cctx.compress(source.getvalue()))
+        self.assertEqual(b"".join(chunks), cctx.compress(source.getvalue()))
 
     def test_read_write_size(self):
-        source = OpCountingBytesIO(b'foobarfoobar')
+        source = OpCountingBytesIO(b"foobarfoobar")
         cctx = zstd.ZstdCompressor(level=3)
         for chunk in cctx.read_to_iter(source, read_size=1, write_size=1):
             self.assertEqual(len(chunk), 1)
@@ -1411,42 +1422,42 @@
 
     def test_multithreaded(self):
         source = io.BytesIO()
-        source.write(b'a' * 1048576)
-        source.write(b'b' * 1048576)
-        source.write(b'c' * 1048576)
+        source.write(b"a" * 1048576)
+        source.write(b"b" * 1048576)
+        source.write(b"c" * 1048576)
         source.seek(0)
 
         cctx = zstd.ZstdCompressor(threads=2)
 
-        compressed = b''.join(cctx.read_to_iter(source))
-        self.assertEqual(len(compressed), 295)
+        compressed = b"".join(cctx.read_to_iter(source))
+        self.assertEqual(len(compressed), 111)
 
     def test_bad_size(self):
         cctx = zstd.ZstdCompressor()
 
-        source = io.BytesIO(b'a' * 42)
+        source = io.BytesIO(b"a" * 42)
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'Src size is incorrect'):
-            b''.join(cctx.read_to_iter(source, size=2))
+        with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"):
+            b"".join(cctx.read_to_iter(source, size=2))
 
         # Test another operation on errored compressor.
-        b''.join(cctx.read_to_iter(source))
+        b"".join(cctx.read_to_iter(source))
 
 
 @make_cffi
-class TestCompressor_chunker(unittest.TestCase):
+class TestCompressor_chunker(TestCase):
     def test_empty(self):
         cctx = zstd.ZstdCompressor(write_content_size=False)
         chunker = cctx.chunker()
 
-        it = chunker.compress(b'')
+        it = chunker.compress(b"")
 
         with self.assertRaises(StopIteration):
             next(it)
 
         it = chunker.finish()
 
-        self.assertEqual(next(it), b'\x28\xb5\x2f\xfd\x00\x58\x01\x00\x00')
+        self.assertEqual(next(it), b"\x28\xb5\x2f\xfd\x00\x58\x01\x00\x00")
 
         with self.assertRaises(StopIteration):
             next(it)
@@ -1455,21 +1466,23 @@
         cctx = zstd.ZstdCompressor()
         chunker = cctx.chunker()
 
-        it = chunker.compress(b'foobar')
+        it = chunker.compress(b"foobar")
 
         with self.assertRaises(StopIteration):
             next(it)
 
-        it = chunker.compress(b'baz' * 30)
+        it = chunker.compress(b"baz" * 30)
 
         with self.assertRaises(StopIteration):
             next(it)
 
         it = chunker.finish()
 
-        self.assertEqual(next(it),
-                         b'\x28\xb5\x2f\xfd\x00\x58\x7d\x00\x00\x48\x66\x6f'
-                         b'\x6f\x62\x61\x72\x62\x61\x7a\x01\x00\xe4\xe4\x8e')
+        self.assertEqual(
+            next(it),
+            b"\x28\xb5\x2f\xfd\x00\x58\x7d\x00\x00\x48\x66\x6f"
+            b"\x6f\x62\x61\x72\x62\x61\x7a\x01\x00\xe4\xe4\x8e",
+        )
 
         with self.assertRaises(StopIteration):
             next(it)
@@ -1478,57 +1491,60 @@
         cctx = zstd.ZstdCompressor()
         chunker = cctx.chunker(size=1024)
 
-        it = chunker.compress(b'x' * 1000)
+        it = chunker.compress(b"x" * 1000)
 
         with self.assertRaises(StopIteration):
             next(it)
 
-        it = chunker.compress(b'y' * 24)
+        it = chunker.compress(b"y" * 24)
 
         with self.assertRaises(StopIteration):
             next(it)
 
         chunks = list(chunker.finish())
 
-        self.assertEqual(chunks, [
-            b'\x28\xb5\x2f\xfd\x60\x00\x03\x65\x00\x00\x18\x78\x78\x79\x02\x00'
-            b'\xa0\x16\xe3\x2b\x80\x05'
-        ])
+        self.assertEqual(
+            chunks,
+            [
+                b"\x28\xb5\x2f\xfd\x60\x00\x03\x65\x00\x00\x18\x78\x78\x79\x02\x00"
+                b"\xa0\x16\xe3\x2b\x80\x05"
+            ],
+        )
 
         dctx = zstd.ZstdDecompressor()
 
-        self.assertEqual(dctx.decompress(b''.join(chunks)),
-                         (b'x' * 1000) + (b'y' * 24))
+        self.assertEqual(dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24))
 
     def test_small_chunk_size(self):
         cctx = zstd.ZstdCompressor()
         chunker = cctx.chunker(chunk_size=1)
 
-        chunks = list(chunker.compress(b'foo' * 1024))
+        chunks = list(chunker.compress(b"foo" * 1024))
         self.assertEqual(chunks, [])
 
         chunks = list(chunker.finish())
         self.assertTrue(all(len(chunk) == 1 for chunk in chunks))
 
         self.assertEqual(
-            b''.join(chunks),
-            b'\x28\xb5\x2f\xfd\x00\x58\x55\x00\x00\x18\x66\x6f\x6f\x01\x00'
-            b'\xfa\xd3\x77\x43')
+            b"".join(chunks),
+            b"\x28\xb5\x2f\xfd\x00\x58\x55\x00\x00\x18\x66\x6f\x6f\x01\x00"
+            b"\xfa\xd3\x77\x43",
+        )
 
         dctx = zstd.ZstdDecompressor()
-        self.assertEqual(dctx.decompress(b''.join(chunks),
-                                         max_output_size=10000),
-                         b'foo' * 1024)
+        self.assertEqual(
+            dctx.decompress(b"".join(chunks), max_output_size=10000), b"foo" * 1024
+        )
 
     def test_input_types(self):
         cctx = zstd.ZstdCompressor()
 
         mutable_array = bytearray(3)
-        mutable_array[:] = b'foo'
+        mutable_array[:] = b"foo"
 
         sources = [
-            memoryview(b'foo'),
-            bytearray(b'foo'),
+            memoryview(b"foo"),
+            bytearray(b"foo"),
             mutable_array,
         ]
 
@@ -1536,28 +1552,32 @@
             chunker = cctx.chunker()
 
             self.assertEqual(list(chunker.compress(source)), [])
-            self.assertEqual(list(chunker.finish()), [
-                b'\x28\xb5\x2f\xfd\x00\x58\x19\x00\x00\x66\x6f\x6f'
-            ])
+            self.assertEqual(
+                list(chunker.finish()),
+                [b"\x28\xb5\x2f\xfd\x00\x58\x19\x00\x00\x66\x6f\x6f"],
+            )
 
     def test_flush(self):
         cctx = zstd.ZstdCompressor()
         chunker = cctx.chunker()
 
-        self.assertEqual(list(chunker.compress(b'foo' * 1024)), [])
-        self.assertEqual(list(chunker.compress(b'bar' * 1024)), [])
+        self.assertEqual(list(chunker.compress(b"foo" * 1024)), [])
+        self.assertEqual(list(chunker.compress(b"bar" * 1024)), [])
 
         chunks1 = list(chunker.flush())
 
-        self.assertEqual(chunks1, [
-            b'\x28\xb5\x2f\xfd\x00\x58\x8c\x00\x00\x30\x66\x6f\x6f\x62\x61\x72'
-            b'\x02\x00\xfa\x03\xfe\xd0\x9f\xbe\x1b\x02'
-        ])
+        self.assertEqual(
+            chunks1,
+            [
+                b"\x28\xb5\x2f\xfd\x00\x58\x8c\x00\x00\x30\x66\x6f\x6f\x62\x61\x72"
+                b"\x02\x00\xfa\x03\xfe\xd0\x9f\xbe\x1b\x02"
+            ],
+        )
 
         self.assertEqual(list(chunker.flush()), [])
         self.assertEqual(list(chunker.flush()), [])
 
-        self.assertEqual(list(chunker.compress(b'baz' * 1024)), [])
+        self.assertEqual(list(chunker.compress(b"baz" * 1024)), [])
 
         chunks2 = list(chunker.flush())
         self.assertEqual(len(chunks2), 1)
@@ -1567,53 +1587,56 @@
 
         dctx = zstd.ZstdDecompressor()
 
-        self.assertEqual(dctx.decompress(b''.join(chunks1 + chunks2 + chunks3),
-                                         max_output_size=10000),
-                         (b'foo' * 1024) + (b'bar' * 1024) + (b'baz' * 1024))
+        self.assertEqual(
+            dctx.decompress(
+                b"".join(chunks1 + chunks2 + chunks3), max_output_size=10000
+            ),
+            (b"foo" * 1024) + (b"bar" * 1024) + (b"baz" * 1024),
+        )
 
     def test_compress_after_finish(self):
         cctx = zstd.ZstdCompressor()
         chunker = cctx.chunker()
 
-        list(chunker.compress(b'foo'))
+        list(chunker.compress(b"foo"))
         list(chunker.finish())
 
-        with self.assertRaisesRegexp(
-                zstd.ZstdError,
-                r'cannot call compress\(\) after compression finished'):
-            list(chunker.compress(b'foo'))
+        with self.assertRaisesRegex(
+            zstd.ZstdError, r"cannot call compress\(\) after compression finished"
+        ):
+            list(chunker.compress(b"foo"))
 
     def test_flush_after_finish(self):
         cctx = zstd.ZstdCompressor()
         chunker = cctx.chunker()
 
-        list(chunker.compress(b'foo'))
+        list(chunker.compress(b"foo"))
         list(chunker.finish())
 
-        with self.assertRaisesRegexp(
-                zstd.ZstdError,
-                r'cannot call flush\(\) after compression finished'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, r"cannot call flush\(\) after compression finished"
+        ):
             list(chunker.flush())
 
     def test_finish_after_finish(self):
         cctx = zstd.ZstdCompressor()
         chunker = cctx.chunker()
 
-        list(chunker.compress(b'foo'))
+        list(chunker.compress(b"foo"))
         list(chunker.finish())
 
-        with self.assertRaisesRegexp(
-                zstd.ZstdError,
-                r'cannot call finish\(\) after compression finished'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, r"cannot call finish\(\) after compression finished"
+        ):
             list(chunker.finish())
 
 
-class TestCompressor_multi_compress_to_buffer(unittest.TestCase):
+class TestCompressor_multi_compress_to_buffer(TestCase):
     def test_invalid_inputs(self):
         cctx = zstd.ZstdCompressor()
 
-        if not hasattr(cctx, 'multi_compress_to_buffer'):
-            self.skipTest('multi_compress_to_buffer not available')
+        if not hasattr(cctx, "multi_compress_to_buffer"):
+            self.skipTest("multi_compress_to_buffer not available")
 
         with self.assertRaises(TypeError):
             cctx.multi_compress_to_buffer(True)
@@ -1621,28 +1644,28 @@
         with self.assertRaises(TypeError):
             cctx.multi_compress_to_buffer((1, 2))
 
-        with self.assertRaisesRegexp(TypeError, 'item 0 not a bytes like object'):
-            cctx.multi_compress_to_buffer([u'foo'])
+        with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"):
+            cctx.multi_compress_to_buffer([u"foo"])
 
     def test_empty_input(self):
         cctx = zstd.ZstdCompressor()
 
-        if not hasattr(cctx, 'multi_compress_to_buffer'):
-            self.skipTest('multi_compress_to_buffer not available')
+        if not hasattr(cctx, "multi_compress_to_buffer"):
+            self.skipTest("multi_compress_to_buffer not available")
 
-        with self.assertRaisesRegexp(ValueError, 'no source elements found'):
+        with self.assertRaisesRegex(ValueError, "no source elements found"):
             cctx.multi_compress_to_buffer([])
 
-        with self.assertRaisesRegexp(ValueError, 'source elements are empty'):
-            cctx.multi_compress_to_buffer([b'', b'', b''])
+        with self.assertRaisesRegex(ValueError, "source elements are empty"):
+            cctx.multi_compress_to_buffer([b"", b"", b""])
 
     def test_list_input(self):
         cctx = zstd.ZstdCompressor(write_checksum=True)
 
-        if not hasattr(cctx, 'multi_compress_to_buffer'):
-            self.skipTest('multi_compress_to_buffer not available')
+        if not hasattr(cctx, "multi_compress_to_buffer"):
+            self.skipTest("multi_compress_to_buffer not available")
 
-        original = [b'foo' * 12, b'bar' * 6]
+        original = [b"foo" * 12, b"bar" * 6]
         frames = [cctx.compress(c) for c in original]
         b = cctx.multi_compress_to_buffer(original)
 
@@ -1657,15 +1680,16 @@
     def test_buffer_with_segments_input(self):
         cctx = zstd.ZstdCompressor(write_checksum=True)
 
-        if not hasattr(cctx, 'multi_compress_to_buffer'):
-            self.skipTest('multi_compress_to_buffer not available')
+        if not hasattr(cctx, "multi_compress_to_buffer"):
+            self.skipTest("multi_compress_to_buffer not available")
 
-        original = [b'foo' * 4, b'bar' * 6]
+        original = [b"foo" * 4, b"bar" * 6]
         frames = [cctx.compress(c) for c in original]
 
-        offsets = struct.pack('=QQQQ', 0, len(original[0]),
-                                       len(original[0]), len(original[1]))
-        segments = zstd.BufferWithSegments(b''.join(original), offsets)
+        offsets = struct.pack(
+            "=QQQQ", 0, len(original[0]), len(original[0]), len(original[1])
+        )
+        segments = zstd.BufferWithSegments(b"".join(original), offsets)
 
         result = cctx.multi_compress_to_buffer(segments)
 
@@ -1678,28 +1702,39 @@
     def test_buffer_with_segments_collection_input(self):
         cctx = zstd.ZstdCompressor(write_checksum=True)
 
-        if not hasattr(cctx, 'multi_compress_to_buffer'):
-            self.skipTest('multi_compress_to_buffer not available')
+        if not hasattr(cctx, "multi_compress_to_buffer"):
+            self.skipTest("multi_compress_to_buffer not available")
 
         original = [
-            b'foo1',
-            b'foo2' * 2,
-            b'foo3' * 3,
-            b'foo4' * 4,
-            b'foo5' * 5,
+            b"foo1",
+            b"foo2" * 2,
+            b"foo3" * 3,
+            b"foo4" * 4,
+            b"foo5" * 5,
         ]
 
         frames = [cctx.compress(c) for c in original]
 
-        b = b''.join([original[0], original[1]])
-        b1 = zstd.BufferWithSegments(b, struct.pack('=QQQQ',
-                                                    0, len(original[0]),
-                                                    len(original[0]), len(original[1])))
-        b = b''.join([original[2], original[3], original[4]])
-        b2 = zstd.BufferWithSegments(b, struct.pack('=QQQQQQ',
-                                                    0, len(original[2]),
-                                                    len(original[2]), len(original[3]),
-                                                    len(original[2]) + len(original[3]), len(original[4])))
+        b = b"".join([original[0], original[1]])
+        b1 = zstd.BufferWithSegments(
+            b,
+            struct.pack(
+                "=QQQQ", 0, len(original[0]), len(original[0]), len(original[1])
+            ),
+        )
+        b = b"".join([original[2], original[3], original[4]])
+        b2 = zstd.BufferWithSegments(
+            b,
+            struct.pack(
+                "=QQQQQQ",
+                0,
+                len(original[2]),
+                len(original[2]),
+                len(original[3]),
+                len(original[2]) + len(original[3]),
+                len(original[4]),
+            ),
+        )
 
         c = zstd.BufferWithSegmentsCollection(b1, b2)
 
@@ -1714,16 +1749,16 @@
         # threads argument will cause multi-threaded ZSTD APIs to be used, which will
         # make output different.
         refcctx = zstd.ZstdCompressor(write_checksum=True)
-        reference = [refcctx.compress(b'x' * 64), refcctx.compress(b'y' * 64)]
+        reference = [refcctx.compress(b"x" * 64), refcctx.compress(b"y" * 64)]
 
         cctx = zstd.ZstdCompressor(write_checksum=True)
 
-        if not hasattr(cctx, 'multi_compress_to_buffer'):
-            self.skipTest('multi_compress_to_buffer not available')
+        if not hasattr(cctx, "multi_compress_to_buffer"):
+            self.skipTest("multi_compress_to_buffer not available")
 
         frames = []
-        frames.extend(b'x' * 64 for i in range(256))
-        frames.extend(b'y' * 64 for i in range(256))
+        frames.extend(b"x" * 64 for i in range(256))
+        frames.extend(b"y" * 64 for i in range(256))
 
         result = cctx.multi_compress_to_buffer(frames, threads=-1)
 
--- a/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Tue Jan 21 13:14:51 2020 -0500
@@ -6,28 +6,31 @@
     import hypothesis
     import hypothesis.strategies as strategies
 except ImportError:
-    raise unittest.SkipTest('hypothesis not available')
+    raise unittest.SkipTest("hypothesis not available")
 
 import zstandard as zstd
 
-from . common import (
+from .common import (
     make_cffi,
     NonClosingBytesIO,
     random_input_data,
+    TestCase,
 )
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestCompressor_stream_reader_fuzzing(unittest.TestCase):
+class TestCompressor_stream_reader_fuzzing(TestCase):
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE))
-    def test_stream_source_read(self, original, level, source_read_size,
-                                read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+    )
+    def test_stream_source_read(self, original, level, source_read_size, read_size):
         if read_size == 0:
             read_size = -1
 
@@ -35,8 +38,9 @@
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(io.BytesIO(original), size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            io.BytesIO(original), size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 chunk = reader.read(read_size)
@@ -45,16 +49,18 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE))
-    def test_buffer_source_read(self, original, level, source_read_size,
-                                read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+    )
+    def test_buffer_source_read(self, original, level, source_read_size, read_size):
         if read_size == 0:
             read_size = -1
 
@@ -62,8 +68,9 @@
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(original, size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            original, size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 chunk = reader.read(read_size)
@@ -72,22 +79,30 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_sizes=strategies.data())
-    def test_stream_source_read_variance(self, original, level, source_read_size,
-                                         read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_sizes=strategies.data(),
+    )
+    def test_stream_source_read_variance(
+        self, original, level, source_read_size, read_sizes
+    ):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(io.BytesIO(original), size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            io.BytesIO(original), size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 read_size = read_sizes.draw(strategies.integers(-1, 16384))
@@ -97,23 +112,31 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_sizes=strategies.data())
-    def test_buffer_source_read_variance(self, original, level, source_read_size,
-                                         read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_sizes=strategies.data(),
+    )
+    def test_buffer_source_read_variance(
+        self, original, level, source_read_size, read_sizes
+    ):
 
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(original, size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            original, size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 read_size = read_sizes.draw(strategies.integers(-1, 16384))
@@ -123,22 +146,25 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE))
-    def test_stream_source_readinto(self, original, level,
-                                    source_read_size, read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+    )
+    def test_stream_source_readinto(self, original, level, source_read_size, read_size):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(io.BytesIO(original), size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            io.BytesIO(original), size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 b = bytearray(read_size)
@@ -149,23 +175,26 @@
 
                 chunks.append(bytes(b[0:count]))
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE))
-    def test_buffer_source_readinto(self, original, level,
-                                    source_read_size, read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+    )
+    def test_buffer_source_readinto(self, original, level, source_read_size, read_size):
 
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(original, size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            original, size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 b = bytearray(read_size)
@@ -176,22 +205,30 @@
 
                 chunks.append(bytes(b[0:count]))
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_sizes=strategies.data())
-    def test_stream_source_readinto_variance(self, original, level,
-                                             source_read_size, read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_sizes=strategies.data(),
+    )
+    def test_stream_source_readinto_variance(
+        self, original, level, source_read_size, read_sizes
+    ):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(io.BytesIO(original), size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            io.BytesIO(original), size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 read_size = read_sizes.draw(strategies.integers(1, 16384))
@@ -203,23 +240,31 @@
 
                 chunks.append(bytes(b[0:count]))
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_sizes=strategies.data())
-    def test_buffer_source_readinto_variance(self, original, level,
-                                             source_read_size, read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_sizes=strategies.data(),
+    )
+    def test_buffer_source_readinto_variance(
+        self, original, level, source_read_size, read_sizes
+    ):
 
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(original, size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            original, size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 read_size = read_sizes.draw(strategies.integers(1, 16384))
@@ -231,16 +276,18 @@
 
                 chunks.append(bytes(b[0:count]))
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE))
-    def test_stream_source_read1(self, original, level, source_read_size,
-                                 read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+    )
+    def test_stream_source_read1(self, original, level, source_read_size, read_size):
         if read_size == 0:
             read_size = -1
 
@@ -248,8 +295,9 @@
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(io.BytesIO(original), size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            io.BytesIO(original), size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 chunk = reader.read1(read_size)
@@ -258,16 +306,18 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE))
-    def test_buffer_source_read1(self, original, level, source_read_size,
-                                 read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+    )
+    def test_buffer_source_read1(self, original, level, source_read_size, read_size):
         if read_size == 0:
             read_size = -1
 
@@ -275,8 +325,9 @@
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(original, size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            original, size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 chunk = reader.read1(read_size)
@@ -285,22 +336,30 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_sizes=strategies.data())
-    def test_stream_source_read1_variance(self, original, level, source_read_size,
-                                          read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_sizes=strategies.data(),
+    )
+    def test_stream_source_read1_variance(
+        self, original, level, source_read_size, read_sizes
+    ):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(io.BytesIO(original), size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            io.BytesIO(original), size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 read_size = read_sizes.draw(strategies.integers(-1, 16384))
@@ -310,23 +369,31 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_sizes=strategies.data())
-    def test_buffer_source_read1_variance(self, original, level, source_read_size,
-                                          read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_sizes=strategies.data(),
+    )
+    def test_buffer_source_read1_variance(
+        self, original, level, source_read_size, read_sizes
+    ):
 
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(original, size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            original, size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 read_size = read_sizes.draw(strategies.integers(-1, 16384))
@@ -336,17 +403,20 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), ref_frame)
-
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE))
-    def test_stream_source_readinto1(self, original, level, source_read_size,
-                                     read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+    )
+    def test_stream_source_readinto1(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -354,8 +424,9 @@
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(io.BytesIO(original), size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            io.BytesIO(original), size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 b = bytearray(read_size)
@@ -366,16 +437,20 @@
 
                 chunks.append(bytes(b[0:count]))
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE))
-    def test_buffer_source_readinto1(self, original, level, source_read_size,
-                                     read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE),
+    )
+    def test_buffer_source_readinto1(
+        self, original, level, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -383,8 +458,9 @@
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(original, size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            original, size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 b = bytearray(read_size)
@@ -395,22 +471,30 @@
 
                 chunks.append(bytes(b[0:count]))
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_sizes=strategies.data())
-    def test_stream_source_readinto1_variance(self, original, level, source_read_size,
-                                              read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_sizes=strategies.data(),
+    )
+    def test_stream_source_readinto1_variance(
+        self, original, level, source_read_size, read_sizes
+    ):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(io.BytesIO(original), size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            io.BytesIO(original), size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 read_size = read_sizes.draw(strategies.integers(1, 16384))
@@ -422,23 +506,31 @@
 
                 chunks.append(bytes(b[0:count]))
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      source_read_size=strategies.integers(1, 16384),
-                      read_sizes=strategies.data())
-    def test_buffer_source_readinto1_variance(self, original, level, source_read_size,
-                                              read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        source_read_size=strategies.integers(1, 16384),
+        read_sizes=strategies.data(),
+    )
+    def test_buffer_source_readinto1_variance(
+        self, original, level, source_read_size, read_sizes
+    ):
 
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        with cctx.stream_reader(original, size=len(original),
-                                read_size=source_read_size) as reader:
+        with cctx.stream_reader(
+            original, size=len(original), read_size=source_read_size
+        ) as reader:
             chunks = []
             while True:
                 read_size = read_sizes.draw(strategies.integers(1, 16384))
@@ -450,35 +542,40 @@
 
                 chunks.append(bytes(b[0:count]))
 
-        self.assertEqual(b''.join(chunks), ref_frame)
-
+        self.assertEqual(b"".join(chunks), ref_frame)
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestCompressor_stream_writer_fuzzing(unittest.TestCase):
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                        level=strategies.integers(min_value=1, max_value=5),
-                        write_size=strategies.integers(min_value=1, max_value=1048576))
+class TestCompressor_stream_writer_fuzzing(TestCase):
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        write_size=strategies.integers(min_value=1, max_value=1048576),
+    )
     def test_write_size_variance(self, original, level, write_size):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
 
         cctx = zstd.ZstdCompressor(level=level)
         b = NonClosingBytesIO()
-        with cctx.stream_writer(b, size=len(original), write_size=write_size) as compressor:
+        with cctx.stream_writer(
+            b, size=len(original), write_size=write_size
+        ) as compressor:
             compressor.write(original)
 
         self.assertEqual(b.getvalue(), ref_frame)
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestCompressor_copy_stream_fuzzing(unittest.TestCase):
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      read_size=strategies.integers(min_value=1, max_value=1048576),
-                      write_size=strategies.integers(min_value=1, max_value=1048576))
+class TestCompressor_copy_stream_fuzzing(TestCase):
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        read_size=strategies.integers(min_value=1, max_value=1048576),
+        write_size=strategies.integers(min_value=1, max_value=1048576),
+    )
     def test_read_write_size_variance(self, original, level, read_size, write_size):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
@@ -487,20 +584,27 @@
         source = io.BytesIO(original)
         dest = io.BytesIO()
 
-        cctx.copy_stream(source, dest, size=len(original), read_size=read_size,
-                         write_size=write_size)
+        cctx.copy_stream(
+            source, dest, size=len(original), read_size=read_size, write_size=write_size
+        )
 
         self.assertEqual(dest.getvalue(), ref_frame)
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestCompressor_compressobj_fuzzing(unittest.TestCase):
+class TestCompressor_compressobj_fuzzing(TestCase):
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      chunk_sizes=strategies.data())
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        chunk_sizes=strategies.data(),
+    )
     def test_random_input_sizes(self, original, level, chunk_sizes):
         refctx = zstd.ZstdCompressor(level=level)
         ref_frame = refctx.compress(original)
@@ -512,7 +616,7 @@
         i = 0
         while True:
             chunk_size = chunk_sizes.draw(strategies.integers(1, 4096))
-            source = original[i:i + chunk_size]
+            source = original[i : i + chunk_size]
             if not source:
                 break
 
@@ -521,14 +625,20 @@
 
         chunks.append(cobj.flush())
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      chunk_sizes=strategies.data(),
-                      flushes=strategies.data())
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        chunk_sizes=strategies.data(),
+        flushes=strategies.data(),
+    )
     def test_flush_block(self, original, level, chunk_sizes, flushes):
         cctx = zstd.ZstdCompressor(level=level)
         cobj = cctx.compressobj()
@@ -541,7 +651,7 @@
         i = 0
         while True:
             input_size = chunk_sizes.draw(strategies.integers(1, 4096))
-            source = original[i:i + input_size]
+            source = original[i : i + input_size]
             if not source:
                 break
 
@@ -558,24 +668,28 @@
             compressed_chunks.append(chunk)
             decompressed_chunks.append(dobj.decompress(chunk))
 
-            self.assertEqual(b''.join(decompressed_chunks), original[0:i])
+            self.assertEqual(b"".join(decompressed_chunks), original[0:i])
 
         chunk = cobj.flush(zstd.COMPRESSOBJ_FLUSH_FINISH)
         compressed_chunks.append(chunk)
         decompressed_chunks.append(dobj.decompress(chunk))
 
-        self.assertEqual(dctx.decompress(b''.join(compressed_chunks),
-                                         max_output_size=len(original)),
-                         original)
-        self.assertEqual(b''.join(decompressed_chunks), original)
+        self.assertEqual(
+            dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)),
+            original,
+        )
+        self.assertEqual(b"".join(decompressed_chunks), original)
+
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestCompressor_read_to_iter_fuzzing(unittest.TestCase):
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      read_size=strategies.integers(min_value=1, max_value=4096),
-                      write_size=strategies.integers(min_value=1, max_value=4096))
+class TestCompressor_read_to_iter_fuzzing(TestCase):
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        read_size=strategies.integers(min_value=1, max_value=4096),
+        write_size=strategies.integers(min_value=1, max_value=4096),
+    )
     def test_read_write_size_variance(self, original, level, read_size, write_size):
         refcctx = zstd.ZstdCompressor(level=level)
         ref_frame = refcctx.compress(original)
@@ -583,32 +697,35 @@
         source = io.BytesIO(original)
 
         cctx = zstd.ZstdCompressor(level=level)
-        chunks = list(cctx.read_to_iter(source, size=len(original),
-                                        read_size=read_size,
-                                        write_size=write_size))
+        chunks = list(
+            cctx.read_to_iter(
+                source, size=len(original), read_size=read_size, write_size=write_size
+            )
+        )
 
-        self.assertEqual(b''.join(chunks), ref_frame)
+        self.assertEqual(b"".join(chunks), ref_frame)
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
-class TestCompressor_multi_compress_to_buffer_fuzzing(unittest.TestCase):
-    @hypothesis.given(original=strategies.lists(strategies.sampled_from(random_input_data()),
-                                                min_size=1, max_size=1024),
-                        threads=strategies.integers(min_value=1, max_value=8),
-                        use_dict=strategies.booleans())
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
+class TestCompressor_multi_compress_to_buffer_fuzzing(TestCase):
+    @hypothesis.given(
+        original=strategies.lists(
+            strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
+        ),
+        threads=strategies.integers(min_value=1, max_value=8),
+        use_dict=strategies.booleans(),
+    )
     def test_data_equivalence(self, original, threads, use_dict):
         kwargs = {}
 
         # Use a content dictionary because it is cheap to create.
         if use_dict:
-            kwargs['dict_data'] = zstd.ZstdCompressionDict(original[0])
+            kwargs["dict_data"] = zstd.ZstdCompressionDict(original[0])
 
-        cctx = zstd.ZstdCompressor(level=1,
-                                   write_checksum=True,
-                                   **kwargs)
+        cctx = zstd.ZstdCompressor(level=1, write_checksum=True, **kwargs)
 
-        if not hasattr(cctx, 'multi_compress_to_buffer'):
-            self.skipTest('multi_compress_to_buffer not available')
+        if not hasattr(cctx, "multi_compress_to_buffer"):
+            self.skipTest("multi_compress_to_buffer not available")
 
         result = cctx.multi_compress_to_buffer(original, threads=-1)
 
@@ -624,17 +741,21 @@
             self.assertEqual(dctx.decompress(frame), original[i])
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestCompressor_chunker_fuzzing(unittest.TestCase):
+class TestCompressor_chunker_fuzzing(TestCase):
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      chunk_size=strategies.integers(
-                          min_value=1,
-                          max_value=32 * 1048576),
-                      input_sizes=strategies.data())
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        chunk_size=strategies.integers(min_value=1, max_value=32 * 1048576),
+        input_sizes=strategies.data(),
+    )
     def test_random_input_sizes(self, original, level, chunk_size, input_sizes):
         cctx = zstd.ZstdCompressor(level=level)
         chunker = cctx.chunker(chunk_size=chunk_size)
@@ -643,7 +764,7 @@
         i = 0
         while True:
             input_size = input_sizes.draw(strategies.integers(1, 4096))
-            source = original[i:i + input_size]
+            source = original[i : i + input_size]
             if not source:
                 break
 
@@ -654,23 +775,26 @@
 
         dctx = zstd.ZstdDecompressor()
 
-        self.assertEqual(dctx.decompress(b''.join(chunks),
-                                         max_output_size=len(original)),
-                         original)
+        self.assertEqual(
+            dctx.decompress(b"".join(chunks), max_output_size=len(original)), original
+        )
 
         self.assertTrue(all(len(chunk) == chunk_size for chunk in chunks[:-1]))
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      chunk_size=strategies.integers(
-                          min_value=1,
-                          max_value=32 * 1048576),
-                      input_sizes=strategies.data(),
-                      flushes=strategies.data())
-    def test_flush_block(self, original, level, chunk_size, input_sizes,
-                         flushes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        chunk_size=strategies.integers(min_value=1, max_value=32 * 1048576),
+        input_sizes=strategies.data(),
+        flushes=strategies.data(),
+    )
+    def test_flush_block(self, original, level, chunk_size, input_sizes, flushes):
         cctx = zstd.ZstdCompressor(level=level)
         chunker = cctx.chunker(chunk_size=chunk_size)
 
@@ -682,7 +806,7 @@
         i = 0
         while True:
             input_size = input_sizes.draw(strategies.integers(1, 4096))
-            source = original[i:i + input_size]
+            source = original[i : i + input_size]
             if not source:
                 break
 
@@ -690,22 +814,23 @@
 
             chunks = list(chunker.compress(source))
             compressed_chunks.extend(chunks)
-            decompressed_chunks.append(dobj.decompress(b''.join(chunks)))
+            decompressed_chunks.append(dobj.decompress(b"".join(chunks)))
 
             if not flushes.draw(strategies.booleans()):
                 continue
 
             chunks = list(chunker.flush())
             compressed_chunks.extend(chunks)
-            decompressed_chunks.append(dobj.decompress(b''.join(chunks)))
+            decompressed_chunks.append(dobj.decompress(b"".join(chunks)))
 
-            self.assertEqual(b''.join(decompressed_chunks), original[0:i])
+            self.assertEqual(b"".join(decompressed_chunks), original[0:i])
 
         chunks = list(chunker.finish())
         compressed_chunks.extend(chunks)
-        decompressed_chunks.append(dobj.decompress(b''.join(chunks)))
+        decompressed_chunks.append(dobj.decompress(b"".join(chunks)))
 
-        self.assertEqual(dctx.decompress(b''.join(compressed_chunks),
-                                         max_output_size=len(original)),
-                         original)
-        self.assertEqual(b''.join(decompressed_chunks), original)
\ No newline at end of file
+        self.assertEqual(
+            dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)),
+            original,
+        )
+        self.assertEqual(b"".join(decompressed_chunks), original)
--- a/contrib/python-zstandard/tests/test_data_structures.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/test_data_structures.py	Tue Jan 21 13:14:51 2020 -0500
@@ -3,29 +3,34 @@
 
 import zstandard as zstd
 
-from . common import (
+from .common import (
     make_cffi,
+    TestCase,
 )
 
 
 @make_cffi
-class TestCompressionParameters(unittest.TestCase):
+class TestCompressionParameters(TestCase):
     def test_bounds(self):
-        zstd.ZstdCompressionParameters(window_log=zstd.WINDOWLOG_MIN,
-                                       chain_log=zstd.CHAINLOG_MIN,
-                                       hash_log=zstd.HASHLOG_MIN,
-                                       search_log=zstd.SEARCHLOG_MIN,
-                                       min_match=zstd.MINMATCH_MIN + 1,
-                                       target_length=zstd.TARGETLENGTH_MIN,
-                                       strategy=zstd.STRATEGY_FAST)
+        zstd.ZstdCompressionParameters(
+            window_log=zstd.WINDOWLOG_MIN,
+            chain_log=zstd.CHAINLOG_MIN,
+            hash_log=zstd.HASHLOG_MIN,
+            search_log=zstd.SEARCHLOG_MIN,
+            min_match=zstd.MINMATCH_MIN + 1,
+            target_length=zstd.TARGETLENGTH_MIN,
+            strategy=zstd.STRATEGY_FAST,
+        )
 
-        zstd.ZstdCompressionParameters(window_log=zstd.WINDOWLOG_MAX,
-                                       chain_log=zstd.CHAINLOG_MAX,
-                                       hash_log=zstd.HASHLOG_MAX,
-                                       search_log=zstd.SEARCHLOG_MAX,
-                                       min_match=zstd.MINMATCH_MAX - 1,
-                                       target_length=zstd.TARGETLENGTH_MAX,
-                                       strategy=zstd.STRATEGY_BTULTRA2)
+        zstd.ZstdCompressionParameters(
+            window_log=zstd.WINDOWLOG_MAX,
+            chain_log=zstd.CHAINLOG_MAX,
+            hash_log=zstd.HASHLOG_MAX,
+            search_log=zstd.SEARCHLOG_MAX,
+            min_match=zstd.MINMATCH_MAX - 1,
+            target_length=zstd.TARGETLENGTH_MAX,
+            strategy=zstd.STRATEGY_BTULTRA2,
+        )
 
     def test_from_level(self):
         p = zstd.ZstdCompressionParameters.from_level(1)
@@ -37,13 +42,15 @@
         self.assertEqual(p.window_log, 19)
 
     def test_members(self):
-        p = zstd.ZstdCompressionParameters(window_log=10,
-                                           chain_log=6,
-                                           hash_log=7,
-                                           search_log=4,
-                                           min_match=5,
-                                           target_length=8,
-                                           strategy=1)
+        p = zstd.ZstdCompressionParameters(
+            window_log=10,
+            chain_log=6,
+            hash_log=7,
+            search_log=4,
+            min_match=5,
+            target_length=8,
+            strategy=1,
+        )
         self.assertEqual(p.window_log, 10)
         self.assertEqual(p.chain_log, 6)
         self.assertEqual(p.hash_log, 7)
@@ -58,8 +65,7 @@
         p = zstd.ZstdCompressionParameters(threads=4)
         self.assertEqual(p.threads, 4)
 
-        p = zstd.ZstdCompressionParameters(threads=2, job_size=1048576,
-                                           overlap_log=6)
+        p = zstd.ZstdCompressionParameters(threads=2, job_size=1048576, overlap_log=6)
         self.assertEqual(p.threads, 2)
         self.assertEqual(p.job_size, 1048576)
         self.assertEqual(p.overlap_log, 6)
@@ -91,20 +97,25 @@
         self.assertEqual(p.ldm_hash_rate_log, 8)
 
     def test_estimated_compression_context_size(self):
-        p = zstd.ZstdCompressionParameters(window_log=20,
-                                           chain_log=16,
-                                           hash_log=17,
-                                           search_log=1,
-                                           min_match=5,
-                                           target_length=16,
-                                           strategy=zstd.STRATEGY_DFAST)
+        p = zstd.ZstdCompressionParameters(
+            window_log=20,
+            chain_log=16,
+            hash_log=17,
+            search_log=1,
+            min_match=5,
+            target_length=16,
+            strategy=zstd.STRATEGY_DFAST,
+        )
 
         # 32-bit has slightly different values from 64-bit.
-        self.assertAlmostEqual(p.estimated_compression_context_size(), 1294144,
-                               delta=250)
+        self.assertAlmostEqual(
+            p.estimated_compression_context_size(), 1294464, delta=400
+        )
 
     def test_strategy(self):
-        with self.assertRaisesRegexp(ValueError, 'cannot specify both compression_strategy'):
+        with self.assertRaisesRegex(
+            ValueError, "cannot specify both compression_strategy"
+        ):
             zstd.ZstdCompressionParameters(strategy=0, compression_strategy=0)
 
         p = zstd.ZstdCompressionParameters(strategy=2)
@@ -114,7 +125,9 @@
         self.assertEqual(p.compression_strategy, 3)
 
     def test_ldm_hash_rate_log(self):
-        with self.assertRaisesRegexp(ValueError, 'cannot specify both ldm_hash_rate_log'):
+        with self.assertRaisesRegex(
+            ValueError, "cannot specify both ldm_hash_rate_log"
+        ):
             zstd.ZstdCompressionParameters(ldm_hash_rate_log=8, ldm_hash_every_log=4)
 
         p = zstd.ZstdCompressionParameters(ldm_hash_rate_log=8)
@@ -124,7 +137,7 @@
         self.assertEqual(p.ldm_hash_every_log, 16)
 
     def test_overlap_log(self):
-        with self.assertRaisesRegexp(ValueError, 'cannot specify both overlap_log'):
+        with self.assertRaisesRegex(ValueError, "cannot specify both overlap_log"):
             zstd.ZstdCompressionParameters(overlap_log=1, overlap_size_log=9)
 
         p = zstd.ZstdCompressionParameters(overlap_log=2)
@@ -137,7 +150,7 @@
 
 
 @make_cffi
-class TestFrameParameters(unittest.TestCase):
+class TestFrameParameters(TestCase):
     def test_invalid_type(self):
         with self.assertRaises(TypeError):
             zstd.get_frame_parameters(None)
@@ -145,71 +158,71 @@
         # Python 3 doesn't appear to convert unicode to Py_buffer.
         if sys.version_info[0] >= 3:
             with self.assertRaises(TypeError):
-                zstd.get_frame_parameters(u'foobarbaz')
+                zstd.get_frame_parameters(u"foobarbaz")
         else:
             # CPython will convert unicode to Py_buffer. But CFFI won't.
-            if zstd.backend == 'cffi':
+            if zstd.backend == "cffi":
                 with self.assertRaises(TypeError):
-                    zstd.get_frame_parameters(u'foobarbaz')
+                    zstd.get_frame_parameters(u"foobarbaz")
             else:
                 with self.assertRaises(zstd.ZstdError):
-                    zstd.get_frame_parameters(u'foobarbaz')
+                    zstd.get_frame_parameters(u"foobarbaz")
 
     def test_invalid_input_sizes(self):
-        with self.assertRaisesRegexp(zstd.ZstdError, 'not enough data for frame'):
-            zstd.get_frame_parameters(b'')
+        with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"):
+            zstd.get_frame_parameters(b"")
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'not enough data for frame'):
+        with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"):
             zstd.get_frame_parameters(zstd.FRAME_HEADER)
 
     def test_invalid_frame(self):
-        with self.assertRaisesRegexp(zstd.ZstdError, 'Unknown frame descriptor'):
-            zstd.get_frame_parameters(b'foobarbaz')
+        with self.assertRaisesRegex(zstd.ZstdError, "Unknown frame descriptor"):
+            zstd.get_frame_parameters(b"foobarbaz")
 
     def test_attributes(self):
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x00\x00')
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x00\x00")
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
         self.assertEqual(params.window_size, 1024)
         self.assertEqual(params.dict_id, 0)
         self.assertFalse(params.has_checksum)
 
         # Lowest 2 bits indicate a dictionary and length. Here, the dict id is 1 byte.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x01\x00\xff')
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x01\x00\xff")
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
         self.assertEqual(params.window_size, 1024)
         self.assertEqual(params.dict_id, 255)
         self.assertFalse(params.has_checksum)
 
         # Lowest 3rd bit indicates if checksum is present.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x04\x00')
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x04\x00")
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
         self.assertEqual(params.window_size, 1024)
         self.assertEqual(params.dict_id, 0)
         self.assertTrue(params.has_checksum)
 
         # Upper 2 bits indicate content size.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x40\x00\xff\x00')
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x40\x00\xff\x00")
         self.assertEqual(params.content_size, 511)
         self.assertEqual(params.window_size, 1024)
         self.assertEqual(params.dict_id, 0)
         self.assertFalse(params.has_checksum)
 
         # Window descriptor is 2nd byte after frame header.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x00\x40')
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x00\x40")
         self.assertEqual(params.content_size, zstd.CONTENTSIZE_UNKNOWN)
         self.assertEqual(params.window_size, 262144)
         self.assertEqual(params.dict_id, 0)
         self.assertFalse(params.has_checksum)
 
         # Set multiple things.
-        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x45\x40\x0f\x10\x00')
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00")
         self.assertEqual(params.content_size, 272)
         self.assertEqual(params.window_size, 262144)
         self.assertEqual(params.dict_id, 15)
         self.assertTrue(params.has_checksum)
 
     def test_input_types(self):
-        v = zstd.FRAME_HEADER + b'\x00\x00'
+        v = zstd.FRAME_HEADER + b"\x00\x00"
 
         mutable_array = bytearray(len(v))
         mutable_array[:] = v
--- a/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Tue Jan 21 13:14:51 2020 -0500
@@ -7,70 +7,99 @@
     import hypothesis
     import hypothesis.strategies as strategies
 except ImportError:
-    raise unittest.SkipTest('hypothesis not available')
+    raise unittest.SkipTest("hypothesis not available")
 
 import zstandard as zstd
 
 from .common import (
     make_cffi,
+    TestCase,
+)
+
+
+s_windowlog = strategies.integers(
+    min_value=zstd.WINDOWLOG_MIN, max_value=zstd.WINDOWLOG_MAX
+)
+s_chainlog = strategies.integers(
+    min_value=zstd.CHAINLOG_MIN, max_value=zstd.CHAINLOG_MAX
+)
+s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX)
+s_searchlog = strategies.integers(
+    min_value=zstd.SEARCHLOG_MIN, max_value=zstd.SEARCHLOG_MAX
+)
+s_minmatch = strategies.integers(
+    min_value=zstd.MINMATCH_MIN, max_value=zstd.MINMATCH_MAX
+)
+s_targetlength = strategies.integers(
+    min_value=zstd.TARGETLENGTH_MIN, max_value=zstd.TARGETLENGTH_MAX
+)
+s_strategy = strategies.sampled_from(
+    (
+        zstd.STRATEGY_FAST,
+        zstd.STRATEGY_DFAST,
+        zstd.STRATEGY_GREEDY,
+        zstd.STRATEGY_LAZY,
+        zstd.STRATEGY_LAZY2,
+        zstd.STRATEGY_BTLAZY2,
+        zstd.STRATEGY_BTOPT,
+        zstd.STRATEGY_BTULTRA,
+        zstd.STRATEGY_BTULTRA2,
+    )
 )
 
 
-s_windowlog = strategies.integers(min_value=zstd.WINDOWLOG_MIN,
-                                    max_value=zstd.WINDOWLOG_MAX)
-s_chainlog = strategies.integers(min_value=zstd.CHAINLOG_MIN,
-                                    max_value=zstd.CHAINLOG_MAX)
-s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN,
-                                max_value=zstd.HASHLOG_MAX)
-s_searchlog = strategies.integers(min_value=zstd.SEARCHLOG_MIN,
-                                    max_value=zstd.SEARCHLOG_MAX)
-s_minmatch = strategies.integers(min_value=zstd.MINMATCH_MIN,
-                                 max_value=zstd.MINMATCH_MAX)
-s_targetlength = strategies.integers(min_value=zstd.TARGETLENGTH_MIN,
-                                     max_value=zstd.TARGETLENGTH_MAX)
-s_strategy = strategies.sampled_from((zstd.STRATEGY_FAST,
-                                        zstd.STRATEGY_DFAST,
-                                        zstd.STRATEGY_GREEDY,
-                                        zstd.STRATEGY_LAZY,
-                                        zstd.STRATEGY_LAZY2,
-                                        zstd.STRATEGY_BTLAZY2,
-                                        zstd.STRATEGY_BTOPT,
-                                        zstd.STRATEGY_BTULTRA,
-                                        zstd.STRATEGY_BTULTRA2))
-
+@make_cffi
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
+class TestCompressionParametersHypothesis(TestCase):
+    @hypothesis.given(
+        s_windowlog,
+        s_chainlog,
+        s_hashlog,
+        s_searchlog,
+        s_minmatch,
+        s_targetlength,
+        s_strategy,
+    )
+    def test_valid_init(
+        self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy
+    ):
+        zstd.ZstdCompressionParameters(
+            window_log=windowlog,
+            chain_log=chainlog,
+            hash_log=hashlog,
+            search_log=searchlog,
+            min_match=minmatch,
+            target_length=targetlength,
+            strategy=strategy,
+        )
 
-@make_cffi
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
-class TestCompressionParametersHypothesis(unittest.TestCase):
-    @hypothesis.given(s_windowlog, s_chainlog, s_hashlog, s_searchlog,
-                        s_minmatch, s_targetlength, s_strategy)
-    def test_valid_init(self, windowlog, chainlog, hashlog, searchlog,
-                        minmatch, targetlength, strategy):
-        zstd.ZstdCompressionParameters(window_log=windowlog,
-                                       chain_log=chainlog,
-                                       hash_log=hashlog,
-                                       search_log=searchlog,
-                                       min_match=minmatch,
-                                       target_length=targetlength,
-                                       strategy=strategy)
-
-    @hypothesis.given(s_windowlog, s_chainlog, s_hashlog, s_searchlog,
-                      s_minmatch, s_targetlength, s_strategy)
-    def test_estimated_compression_context_size(self, windowlog, chainlog,
-                                                hashlog, searchlog,
-                                                minmatch, targetlength,
-                                                strategy):
-        if minmatch == zstd.MINMATCH_MIN and strategy in (zstd.STRATEGY_FAST, zstd.STRATEGY_GREEDY):
+    @hypothesis.given(
+        s_windowlog,
+        s_chainlog,
+        s_hashlog,
+        s_searchlog,
+        s_minmatch,
+        s_targetlength,
+        s_strategy,
+    )
+    def test_estimated_compression_context_size(
+        self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy
+    ):
+        if minmatch == zstd.MINMATCH_MIN and strategy in (
+            zstd.STRATEGY_FAST,
+            zstd.STRATEGY_GREEDY,
+        ):
             minmatch += 1
         elif minmatch == zstd.MINMATCH_MAX and strategy != zstd.STRATEGY_FAST:
             minmatch -= 1
 
-        p = zstd.ZstdCompressionParameters(window_log=windowlog,
-                                           chain_log=chainlog,
-                                           hash_log=hashlog,
-                                           search_log=searchlog,
-                                           min_match=minmatch,
-                                           target_length=targetlength,
-                                           strategy=strategy)
+        p = zstd.ZstdCompressionParameters(
+            window_log=windowlog,
+            chain_log=chainlog,
+            hash_log=hashlog,
+            search_log=searchlog,
+            min_match=minmatch,
+            target_length=targetlength,
+            strategy=strategy,
+        )
         size = p.estimated_compression_context_size()
-
--- a/contrib/python-zstandard/tests/test_decompressor.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/test_decompressor.py	Tue Jan 21 13:14:51 2020 -0500
@@ -13,6 +13,7 @@
     make_cffi,
     NonClosingBytesIO,
     OpCountingBytesIO,
+    TestCase,
 )
 
 
@@ -23,62 +24,67 @@
 
 
 @make_cffi
-class TestFrameHeaderSize(unittest.TestCase):
+class TestFrameHeaderSize(TestCase):
     def test_empty(self):
-        with self.assertRaisesRegexp(
-            zstd.ZstdError, 'could not determine frame header size: Src size '
-                            'is incorrect'):
-            zstd.frame_header_size(b'')
+        with self.assertRaisesRegex(
+            zstd.ZstdError,
+            "could not determine frame header size: Src size " "is incorrect",
+        ):
+            zstd.frame_header_size(b"")
 
     def test_too_small(self):
-        with self.assertRaisesRegexp(
-            zstd.ZstdError, 'could not determine frame header size: Src size '
-                            'is incorrect'):
-            zstd.frame_header_size(b'foob')
+        with self.assertRaisesRegex(
+            zstd.ZstdError,
+            "could not determine frame header size: Src size " "is incorrect",
+        ):
+            zstd.frame_header_size(b"foob")
 
     def test_basic(self):
         # It doesn't matter that it isn't a valid frame.
-        self.assertEqual(zstd.frame_header_size(b'long enough but no magic'), 6)
+        self.assertEqual(zstd.frame_header_size(b"long enough but no magic"), 6)
 
 
 @make_cffi
-class TestFrameContentSize(unittest.TestCase):
+class TestFrameContentSize(TestCase):
     def test_empty(self):
-        with self.assertRaisesRegexp(zstd.ZstdError,
-                                     'error when determining content size'):
-            zstd.frame_content_size(b'')
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "error when determining content size"
+        ):
+            zstd.frame_content_size(b"")
 
     def test_too_small(self):
-        with self.assertRaisesRegexp(zstd.ZstdError,
-                                     'error when determining content size'):
-            zstd.frame_content_size(b'foob')
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "error when determining content size"
+        ):
+            zstd.frame_content_size(b"foob")
 
     def test_bad_frame(self):
-        with self.assertRaisesRegexp(zstd.ZstdError,
-                                     'error when determining content size'):
-            zstd.frame_content_size(b'invalid frame header')
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "error when determining content size"
+        ):
+            zstd.frame_content_size(b"invalid frame header")
 
     def test_unknown(self):
         cctx = zstd.ZstdCompressor(write_content_size=False)
-        frame = cctx.compress(b'foobar')
+        frame = cctx.compress(b"foobar")
 
         self.assertEqual(zstd.frame_content_size(frame), -1)
 
     def test_empty(self):
         cctx = zstd.ZstdCompressor()
-        frame = cctx.compress(b'')
+        frame = cctx.compress(b"")
 
         self.assertEqual(zstd.frame_content_size(frame), 0)
 
     def test_basic(self):
         cctx = zstd.ZstdCompressor()
-        frame = cctx.compress(b'foobar')
+        frame = cctx.compress(b"foobar")
 
         self.assertEqual(zstd.frame_content_size(frame), 6)
 
 
 @make_cffi
-class TestDecompressor(unittest.TestCase):
+class TestDecompressor(TestCase):
     def test_memory_size(self):
         dctx = zstd.ZstdDecompressor()
 
@@ -86,22 +92,26 @@
 
 
 @make_cffi
-class TestDecompressor_decompress(unittest.TestCase):
+class TestDecompressor_decompress(TestCase):
     def test_empty_input(self):
         dctx = zstd.ZstdDecompressor()
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'error determining content size from frame header'):
-            dctx.decompress(b'')
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "error determining content size from frame header"
+        ):
+            dctx.decompress(b"")
 
     def test_invalid_input(self):
         dctx = zstd.ZstdDecompressor()
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'error determining content size from frame header'):
-            dctx.decompress(b'foobar')
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "error determining content size from frame header"
+        ):
+            dctx.decompress(b"foobar")
 
     def test_input_types(self):
         cctx = zstd.ZstdCompressor(level=1)
-        compressed = cctx.compress(b'foo')
+        compressed = cctx.compress(b"foo")
 
         mutable_array = bytearray(len(compressed))
         mutable_array[:] = compressed
@@ -114,36 +124,38 @@
 
         dctx = zstd.ZstdDecompressor()
         for source in sources:
-            self.assertEqual(dctx.decompress(source), b'foo')
+            self.assertEqual(dctx.decompress(source), b"foo")
 
     def test_no_content_size_in_frame(self):
         cctx = zstd.ZstdCompressor(write_content_size=False)
-        compressed = cctx.compress(b'foobar')
+        compressed = cctx.compress(b"foobar")
 
         dctx = zstd.ZstdDecompressor()
-        with self.assertRaisesRegexp(zstd.ZstdError, 'could not determine content size in frame header'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "could not determine content size in frame header"
+        ):
             dctx.decompress(compressed)
 
     def test_content_size_present(self):
         cctx = zstd.ZstdCompressor()
-        compressed = cctx.compress(b'foobar')
+        compressed = cctx.compress(b"foobar")
 
         dctx = zstd.ZstdDecompressor()
         decompressed = dctx.decompress(compressed)
-        self.assertEqual(decompressed, b'foobar')
+        self.assertEqual(decompressed, b"foobar")
 
     def test_empty_roundtrip(self):
         cctx = zstd.ZstdCompressor()
-        compressed = cctx.compress(b'')
+        compressed = cctx.compress(b"")
 
         dctx = zstd.ZstdDecompressor()
         decompressed = dctx.decompress(compressed)
 
-        self.assertEqual(decompressed, b'')
+        self.assertEqual(decompressed, b"")
 
     def test_max_output_size(self):
         cctx = zstd.ZstdCompressor(write_content_size=False)
-        source = b'foobar' * 256
+        source = b"foobar" * 256
         compressed = cctx.compress(source)
 
         dctx = zstd.ZstdDecompressor()
@@ -152,8 +164,9 @@
         self.assertEqual(decompressed, source)
 
         # Input size - 1 fails
-        with self.assertRaisesRegexp(zstd.ZstdError,
-                'decompression error: did not decompress full frame'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "decompression error: did not decompress full frame"
+        ):
             dctx.decompress(compressed, max_output_size=len(source) - 1)
 
         # Input size + 1 works
@@ -166,24 +179,24 @@
 
     def test_stupidly_large_output_buffer(self):
         cctx = zstd.ZstdCompressor(write_content_size=False)
-        compressed = cctx.compress(b'foobar' * 256)
+        compressed = cctx.compress(b"foobar" * 256)
         dctx = zstd.ZstdDecompressor()
 
         # Will get OverflowError on some Python distributions that can't
         # handle really large integers.
         with self.assertRaises((MemoryError, OverflowError)):
-            dctx.decompress(compressed, max_output_size=2**62)
+            dctx.decompress(compressed, max_output_size=2 ** 62)
 
     def test_dictionary(self):
         samples = []
         for i in range(128):
-            samples.append(b'foo' * 64)
-            samples.append(b'bar' * 64)
-            samples.append(b'foobar' * 64)
+            samples.append(b"foo" * 64)
+            samples.append(b"bar" * 64)
+            samples.append(b"foobar" * 64)
 
         d = zstd.train_dictionary(8192, samples)
 
-        orig = b'foobar' * 16384
+        orig = b"foobar" * 16384
         cctx = zstd.ZstdCompressor(level=1, dict_data=d)
         compressed = cctx.compress(orig)
 
@@ -195,13 +208,13 @@
     def test_dictionary_multiple(self):
         samples = []
         for i in range(128):
-            samples.append(b'foo' * 64)
-            samples.append(b'bar' * 64)
-            samples.append(b'foobar' * 64)
+            samples.append(b"foo" * 64)
+            samples.append(b"bar" * 64)
+            samples.append(b"foobar" * 64)
 
         d = zstd.train_dictionary(8192, samples)
 
-        sources = (b'foobar' * 8192, b'foo' * 8192, b'bar' * 8192)
+        sources = (b"foobar" * 8192, b"foo" * 8192, b"bar" * 8192)
         compressed = []
         cctx = zstd.ZstdCompressor(level=1, dict_data=d)
         for source in sources:
@@ -213,7 +226,7 @@
             self.assertEqual(decompressed, sources[i])
 
     def test_max_window_size(self):
-        with open(__file__, 'rb') as fh:
+        with open(__file__, "rb") as fh:
             source = fh.read()
 
         # If we write a content size, the decompressor engages single pass
@@ -221,15 +234,16 @@
         cctx = zstd.ZstdCompressor(write_content_size=False)
         frame = cctx.compress(source)
 
-        dctx = zstd.ZstdDecompressor(max_window_size=2**zstd.WINDOWLOG_MIN)
+        dctx = zstd.ZstdDecompressor(max_window_size=2 ** zstd.WINDOWLOG_MIN)
 
-        with self.assertRaisesRegexp(
-            zstd.ZstdError, 'decompression error: Frame requires too much memory'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "decompression error: Frame requires too much memory"
+        ):
             dctx.decompress(frame, max_output_size=len(source))
 
 
 @make_cffi
-class TestDecompressor_copy_stream(unittest.TestCase):
+class TestDecompressor_copy_stream(TestCase):
     def test_no_read(self):
         source = object()
         dest = io.BytesIO()
@@ -256,12 +270,12 @@
 
         self.assertEqual(r, 0)
         self.assertEqual(w, 0)
-        self.assertEqual(dest.getvalue(), b'')
+        self.assertEqual(dest.getvalue(), b"")
 
     def test_large_data(self):
         source = io.BytesIO()
         for i in range(255):
-            source.write(struct.Struct('>B').pack(i) * 16384)
+            source.write(struct.Struct(">B").pack(i) * 16384)
         source.seek(0)
 
         compressed = io.BytesIO()
@@ -277,33 +291,32 @@
         self.assertEqual(w, len(source.getvalue()))
 
     def test_read_write_size(self):
-        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(
-            b'foobarfoobar'))
+        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar"))
 
         dest = OpCountingBytesIO()
         dctx = zstd.ZstdDecompressor()
         r, w = dctx.copy_stream(source, dest, read_size=1, write_size=1)
 
         self.assertEqual(r, len(source.getvalue()))
-        self.assertEqual(w, len(b'foobarfoobar'))
+        self.assertEqual(w, len(b"foobarfoobar"))
         self.assertEqual(source._read_count, len(source.getvalue()) + 1)
         self.assertEqual(dest._write_count, len(dest.getvalue()))
 
 
 @make_cffi
-class TestDecompressor_stream_reader(unittest.TestCase):
+class TestDecompressor_stream_reader(TestCase):
     def test_context_manager(self):
         dctx = zstd.ZstdDecompressor()
 
-        with dctx.stream_reader(b'foo') as reader:
-            with self.assertRaisesRegexp(ValueError, 'cannot __enter__ multiple times'):
+        with dctx.stream_reader(b"foo") as reader:
+            with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"):
                 with reader as reader2:
                     pass
 
     def test_not_implemented(self):
         dctx = zstd.ZstdDecompressor()
 
-        with dctx.stream_reader(b'foo') as reader:
+        with dctx.stream_reader(b"foo") as reader:
             with self.assertRaises(io.UnsupportedOperation):
                 reader.readline()
 
@@ -317,7 +330,7 @@
                 next(reader)
 
             with self.assertRaises(io.UnsupportedOperation):
-                reader.write(b'foo')
+                reader.write(b"foo")
 
             with self.assertRaises(io.UnsupportedOperation):
                 reader.writelines([])
@@ -325,7 +338,7 @@
     def test_constant_methods(self):
         dctx = zstd.ZstdDecompressor()
 
-        with dctx.stream_reader(b'foo') as reader:
+        with dctx.stream_reader(b"foo") as reader:
             self.assertFalse(reader.closed)
             self.assertTrue(reader.readable())
             self.assertFalse(reader.writable())
@@ -340,29 +353,31 @@
     def test_read_closed(self):
         dctx = zstd.ZstdDecompressor()
 
-        with dctx.stream_reader(b'foo') as reader:
+        with dctx.stream_reader(b"foo") as reader:
             reader.close()
             self.assertTrue(reader.closed)
-            with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+            with self.assertRaisesRegex(ValueError, "stream is closed"):
                 reader.read(1)
 
     def test_read_sizes(self):
         cctx = zstd.ZstdCompressor()
-        foo = cctx.compress(b'foo')
+        foo = cctx.compress(b"foo")
 
         dctx = zstd.ZstdDecompressor()
 
         with dctx.stream_reader(foo) as reader:
-            with self.assertRaisesRegexp(ValueError, 'cannot read negative amounts less than -1'):
+            with self.assertRaisesRegex(
+                ValueError, "cannot read negative amounts less than -1"
+            ):
                 reader.read(-2)
 
-            self.assertEqual(reader.read(0), b'')
-            self.assertEqual(reader.read(), b'foo')
+            self.assertEqual(reader.read(0), b"")
+            self.assertEqual(reader.read(), b"foo")
 
     def test_read_buffer(self):
         cctx = zstd.ZstdCompressor()
 
-        source = b''.join([b'foo' * 60, b'bar' * 60, b'baz' * 60])
+        source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
         frame = cctx.compress(source)
 
         dctx = zstd.ZstdDecompressor()
@@ -376,14 +391,14 @@
             self.assertEqual(reader.tell(), len(source))
 
             # Read after EOF should return empty bytes.
-            self.assertEqual(reader.read(1), b'')
+            self.assertEqual(reader.read(1), b"")
             self.assertEqual(reader.tell(), len(result))
 
         self.assertTrue(reader.closed)
 
     def test_read_buffer_small_chunks(self):
         cctx = zstd.ZstdCompressor()
-        source = b''.join([b'foo' * 60, b'bar' * 60, b'baz' * 60])
+        source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
         frame = cctx.compress(source)
 
         dctx = zstd.ZstdDecompressor()
@@ -398,11 +413,11 @@
                 chunks.append(chunk)
                 self.assertEqual(reader.tell(), sum(map(len, chunks)))
 
-        self.assertEqual(b''.join(chunks), source)
+        self.assertEqual(b"".join(chunks), source)
 
     def test_read_stream(self):
         cctx = zstd.ZstdCompressor()
-        source = b''.join([b'foo' * 60, b'bar' * 60, b'baz' * 60])
+        source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
         frame = cctx.compress(source)
 
         dctx = zstd.ZstdDecompressor()
@@ -412,7 +427,7 @@
             chunk = reader.read(8192)
             self.assertEqual(chunk, source)
             self.assertEqual(reader.tell(), len(source))
-            self.assertEqual(reader.read(1), b'')
+            self.assertEqual(reader.read(1), b"")
             self.assertEqual(reader.tell(), len(source))
             self.assertFalse(reader.closed)
 
@@ -420,7 +435,7 @@
 
     def test_read_stream_small_chunks(self):
         cctx = zstd.ZstdCompressor()
-        source = b''.join([b'foo' * 60, b'bar' * 60, b'baz' * 60])
+        source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
         frame = cctx.compress(source)
 
         dctx = zstd.ZstdDecompressor()
@@ -435,11 +450,11 @@
                 chunks.append(chunk)
                 self.assertEqual(reader.tell(), sum(map(len, chunks)))
 
-        self.assertEqual(b''.join(chunks), source)
+        self.assertEqual(b"".join(chunks), source)
 
     def test_read_after_exit(self):
         cctx = zstd.ZstdCompressor()
-        frame = cctx.compress(b'foo' * 60)
+        frame = cctx.compress(b"foo" * 60)
 
         dctx = zstd.ZstdDecompressor()
 
@@ -449,45 +464,46 @@
 
         self.assertTrue(reader.closed)
 
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
             reader.read(10)
 
     def test_illegal_seeks(self):
         cctx = zstd.ZstdCompressor()
-        frame = cctx.compress(b'foo' * 60)
+        frame = cctx.compress(b"foo" * 60)
 
         dctx = zstd.ZstdDecompressor()
 
         with dctx.stream_reader(frame) as reader:
-            with self.assertRaisesRegexp(ValueError,
-                                         'cannot seek to negative position'):
+            with self.assertRaisesRegex(ValueError, "cannot seek to negative position"):
                 reader.seek(-1, os.SEEK_SET)
 
             reader.read(1)
 
-            with self.assertRaisesRegexp(
-                ValueError, 'cannot seek zstd decompression stream backwards'):
+            with self.assertRaisesRegex(
+                ValueError, "cannot seek zstd decompression stream backwards"
+            ):
                 reader.seek(0, os.SEEK_SET)
 
-            with self.assertRaisesRegexp(
-                ValueError, 'cannot seek zstd decompression stream backwards'):
+            with self.assertRaisesRegex(
+                ValueError, "cannot seek zstd decompression stream backwards"
+            ):
                 reader.seek(-1, os.SEEK_CUR)
 
-            with self.assertRaisesRegexp(
-                ValueError,
-                'zstd decompression streams cannot be seeked with SEEK_END'):
+            with self.assertRaisesRegex(
+                ValueError, "zstd decompression streams cannot be seeked with SEEK_END"
+            ):
                 reader.seek(0, os.SEEK_END)
 
             reader.close()
 
-            with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+            with self.assertRaisesRegex(ValueError, "stream is closed"):
                 reader.seek(4, os.SEEK_SET)
 
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
             reader.seek(0)
 
     def test_seek(self):
-        source = b'foobar' * 60
+        source = b"foobar" * 60
         cctx = zstd.ZstdCompressor()
         frame = cctx.compress(source)
 
@@ -495,32 +511,32 @@
 
         with dctx.stream_reader(frame) as reader:
             reader.seek(3)
-            self.assertEqual(reader.read(3), b'bar')
+            self.assertEqual(reader.read(3), b"bar")
 
             reader.seek(4, os.SEEK_CUR)
-            self.assertEqual(reader.read(2), b'ar')
+            self.assertEqual(reader.read(2), b"ar")
 
     def test_no_context_manager(self):
-        source = b'foobar' * 60
+        source = b"foobar" * 60
         cctx = zstd.ZstdCompressor()
         frame = cctx.compress(source)
 
         dctx = zstd.ZstdDecompressor()
         reader = dctx.stream_reader(frame)
 
-        self.assertEqual(reader.read(6), b'foobar')
-        self.assertEqual(reader.read(18), b'foobar' * 3)
+        self.assertEqual(reader.read(6), b"foobar")
+        self.assertEqual(reader.read(18), b"foobar" * 3)
         self.assertFalse(reader.closed)
 
         # Calling close prevents subsequent use.
         reader.close()
         self.assertTrue(reader.closed)
 
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
             reader.read(6)
 
     def test_read_after_error(self):
-        source = io.BytesIO(b'')
+        source = io.BytesIO(b"")
         dctx = zstd.ZstdDecompressor()
 
         reader = dctx.stream_reader(source)
@@ -529,7 +545,7 @@
             reader.read(0)
 
         with reader:
-            with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+            with self.assertRaisesRegex(ValueError, "stream is closed"):
                 reader.read(100)
 
     def test_partial_read(self):
@@ -553,87 +569,87 @@
         cctx = zstd.ZstdCompressor()
         source = io.BytesIO()
         writer = cctx.stream_writer(source)
-        writer.write(b'foo')
+        writer.write(b"foo")
         writer.flush(zstd.FLUSH_FRAME)
-        writer.write(b'bar')
+        writer.write(b"bar")
         writer.flush(zstd.FLUSH_FRAME)
 
         dctx = zstd.ZstdDecompressor()
 
         reader = dctx.stream_reader(source.getvalue())
-        self.assertEqual(reader.read(2), b'fo')
-        self.assertEqual(reader.read(2), b'o')
-        self.assertEqual(reader.read(2), b'ba')
-        self.assertEqual(reader.read(2), b'r')
+        self.assertEqual(reader.read(2), b"fo")
+        self.assertEqual(reader.read(2), b"o")
+        self.assertEqual(reader.read(2), b"ba")
+        self.assertEqual(reader.read(2), b"r")
 
         source.seek(0)
         reader = dctx.stream_reader(source)
-        self.assertEqual(reader.read(2), b'fo')
-        self.assertEqual(reader.read(2), b'o')
-        self.assertEqual(reader.read(2), b'ba')
-        self.assertEqual(reader.read(2), b'r')
+        self.assertEqual(reader.read(2), b"fo")
+        self.assertEqual(reader.read(2), b"o")
+        self.assertEqual(reader.read(2), b"ba")
+        self.assertEqual(reader.read(2), b"r")
 
         reader = dctx.stream_reader(source.getvalue())
-        self.assertEqual(reader.read(3), b'foo')
-        self.assertEqual(reader.read(3), b'bar')
+        self.assertEqual(reader.read(3), b"foo")
+        self.assertEqual(reader.read(3), b"bar")
 
         source.seek(0)
         reader = dctx.stream_reader(source)
-        self.assertEqual(reader.read(3), b'foo')
-        self.assertEqual(reader.read(3), b'bar')
+        self.assertEqual(reader.read(3), b"foo")
+        self.assertEqual(reader.read(3), b"bar")
 
         reader = dctx.stream_reader(source.getvalue())
-        self.assertEqual(reader.read(4), b'foo')
-        self.assertEqual(reader.read(4), b'bar')
+        self.assertEqual(reader.read(4), b"foo")
+        self.assertEqual(reader.read(4), b"bar")
 
         source.seek(0)
         reader = dctx.stream_reader(source)
-        self.assertEqual(reader.read(4), b'foo')
-        self.assertEqual(reader.read(4), b'bar')
+        self.assertEqual(reader.read(4), b"foo")
+        self.assertEqual(reader.read(4), b"bar")
 
         reader = dctx.stream_reader(source.getvalue())
-        self.assertEqual(reader.read(128), b'foo')
-        self.assertEqual(reader.read(128), b'bar')
+        self.assertEqual(reader.read(128), b"foo")
+        self.assertEqual(reader.read(128), b"bar")
 
         source.seek(0)
         reader = dctx.stream_reader(source)
-        self.assertEqual(reader.read(128), b'foo')
-        self.assertEqual(reader.read(128), b'bar')
+        self.assertEqual(reader.read(128), b"foo")
+        self.assertEqual(reader.read(128), b"bar")
 
         # Now tests for reads spanning frames.
         reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
-        self.assertEqual(reader.read(3), b'foo')
-        self.assertEqual(reader.read(3), b'bar')
+        self.assertEqual(reader.read(3), b"foo")
+        self.assertEqual(reader.read(3), b"bar")
 
         source.seek(0)
         reader = dctx.stream_reader(source, read_across_frames=True)
-        self.assertEqual(reader.read(3), b'foo')
-        self.assertEqual(reader.read(3), b'bar')
+        self.assertEqual(reader.read(3), b"foo")
+        self.assertEqual(reader.read(3), b"bar")
 
         reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
-        self.assertEqual(reader.read(6), b'foobar')
+        self.assertEqual(reader.read(6), b"foobar")
 
         source.seek(0)
         reader = dctx.stream_reader(source, read_across_frames=True)
-        self.assertEqual(reader.read(6), b'foobar')
+        self.assertEqual(reader.read(6), b"foobar")
 
         reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
-        self.assertEqual(reader.read(7), b'foobar')
+        self.assertEqual(reader.read(7), b"foobar")
 
         source.seek(0)
         reader = dctx.stream_reader(source, read_across_frames=True)
-        self.assertEqual(reader.read(7), b'foobar')
+        self.assertEqual(reader.read(7), b"foobar")
 
         reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
-        self.assertEqual(reader.read(128), b'foobar')
+        self.assertEqual(reader.read(128), b"foobar")
 
         source.seek(0)
         reader = dctx.stream_reader(source, read_across_frames=True)
-        self.assertEqual(reader.read(128), b'foobar')
+        self.assertEqual(reader.read(128), b"foobar")
 
     def test_readinto(self):
         cctx = zstd.ZstdCompressor()
-        foo = cctx.compress(b'foo')
+        foo = cctx.compress(b"foo")
 
         dctx = zstd.ZstdDecompressor()
 
@@ -641,116 +657,116 @@
         # The exact exception varies based on the backend.
         reader = dctx.stream_reader(foo)
         with self.assertRaises(Exception):
-            reader.readinto(b'foobar')
+            reader.readinto(b"foobar")
 
         # readinto() with sufficiently large destination.
         b = bytearray(1024)
         reader = dctx.stream_reader(foo)
         self.assertEqual(reader.readinto(b), 3)
-        self.assertEqual(b[0:3], b'foo')
+        self.assertEqual(b[0:3], b"foo")
         self.assertEqual(reader.readinto(b), 0)
-        self.assertEqual(b[0:3], b'foo')
+        self.assertEqual(b[0:3], b"foo")
 
         # readinto() with small reads.
         b = bytearray(1024)
         reader = dctx.stream_reader(foo, read_size=1)
         self.assertEqual(reader.readinto(b), 3)
-        self.assertEqual(b[0:3], b'foo')
+        self.assertEqual(b[0:3], b"foo")
 
         # Too small destination buffer.
         b = bytearray(2)
         reader = dctx.stream_reader(foo)
         self.assertEqual(reader.readinto(b), 2)
-        self.assertEqual(b[:], b'fo')
+        self.assertEqual(b[:], b"fo")
 
     def test_readinto1(self):
         cctx = zstd.ZstdCompressor()
-        foo = cctx.compress(b'foo')
+        foo = cctx.compress(b"foo")
 
         dctx = zstd.ZstdDecompressor()
 
         reader = dctx.stream_reader(foo)
         with self.assertRaises(Exception):
-            reader.readinto1(b'foobar')
+            reader.readinto1(b"foobar")
 
         # Sufficiently large destination.
         b = bytearray(1024)
         reader = dctx.stream_reader(foo)
         self.assertEqual(reader.readinto1(b), 3)
-        self.assertEqual(b[0:3], b'foo')
+        self.assertEqual(b[0:3], b"foo")
         self.assertEqual(reader.readinto1(b), 0)
-        self.assertEqual(b[0:3], b'foo')
+        self.assertEqual(b[0:3], b"foo")
 
         # readinto() with small reads.
         b = bytearray(1024)
         reader = dctx.stream_reader(foo, read_size=1)
         self.assertEqual(reader.readinto1(b), 3)
-        self.assertEqual(b[0:3], b'foo')
+        self.assertEqual(b[0:3], b"foo")
 
         # Too small destination buffer.
         b = bytearray(2)
         reader = dctx.stream_reader(foo)
         self.assertEqual(reader.readinto1(b), 2)
-        self.assertEqual(b[:], b'fo')
+        self.assertEqual(b[:], b"fo")
 
     def test_readall(self):
         cctx = zstd.ZstdCompressor()
-        foo = cctx.compress(b'foo')
+        foo = cctx.compress(b"foo")
 
         dctx = zstd.ZstdDecompressor()
         reader = dctx.stream_reader(foo)
 
-        self.assertEqual(reader.readall(), b'foo')
+        self.assertEqual(reader.readall(), b"foo")
 
     def test_read1(self):
         cctx = zstd.ZstdCompressor()
-        foo = cctx.compress(b'foo')
+        foo = cctx.compress(b"foo")
 
         dctx = zstd.ZstdDecompressor()
 
         b = OpCountingBytesIO(foo)
         reader = dctx.stream_reader(b)
 
-        self.assertEqual(reader.read1(), b'foo')
+        self.assertEqual(reader.read1(), b"foo")
         self.assertEqual(b._read_count, 1)
 
         b = OpCountingBytesIO(foo)
         reader = dctx.stream_reader(b)
 
-        self.assertEqual(reader.read1(0), b'')
-        self.assertEqual(reader.read1(2), b'fo')
+        self.assertEqual(reader.read1(0), b"")
+        self.assertEqual(reader.read1(2), b"fo")
         self.assertEqual(b._read_count, 1)
-        self.assertEqual(reader.read1(1), b'o')
+        self.assertEqual(reader.read1(1), b"o")
         self.assertEqual(b._read_count, 1)
-        self.assertEqual(reader.read1(1), b'')
+        self.assertEqual(reader.read1(1), b"")
         self.assertEqual(b._read_count, 2)
 
     def test_read_lines(self):
         cctx = zstd.ZstdCompressor()
-        source = b'\n'.join(('line %d' % i).encode('ascii') for i in range(1024))
+        source = b"\n".join(("line %d" % i).encode("ascii") for i in range(1024))
 
         frame = cctx.compress(source)
 
         dctx = zstd.ZstdDecompressor()
         reader = dctx.stream_reader(frame)
-        tr = io.TextIOWrapper(reader, encoding='utf-8')
+        tr = io.TextIOWrapper(reader, encoding="utf-8")
 
         lines = []
         for line in tr:
-            lines.append(line.encode('utf-8'))
+            lines.append(line.encode("utf-8"))
 
         self.assertEqual(len(lines), 1024)
-        self.assertEqual(b''.join(lines), source)
+        self.assertEqual(b"".join(lines), source)
 
         reader = dctx.stream_reader(frame)
-        tr = io.TextIOWrapper(reader, encoding='utf-8')
+        tr = io.TextIOWrapper(reader, encoding="utf-8")
 
         lines = tr.readlines()
         self.assertEqual(len(lines), 1024)
-        self.assertEqual(''.join(lines).encode('utf-8'), source)
+        self.assertEqual("".join(lines).encode("utf-8"), source)
 
         reader = dctx.stream_reader(frame)
-        tr = io.TextIOWrapper(reader, encoding='utf-8')
+        tr = io.TextIOWrapper(reader, encoding="utf-8")
 
         lines = []
         while True:
@@ -758,26 +774,26 @@
             if not line:
                 break
 
-            lines.append(line.encode('utf-8'))
+            lines.append(line.encode("utf-8"))
 
         self.assertEqual(len(lines), 1024)
-        self.assertEqual(b''.join(lines), source)
+        self.assertEqual(b"".join(lines), source)
 
 
 @make_cffi
-class TestDecompressor_decompressobj(unittest.TestCase):
+class TestDecompressor_decompressobj(TestCase):
     def test_simple(self):
-        data = zstd.ZstdCompressor(level=1).compress(b'foobar')
+        data = zstd.ZstdCompressor(level=1).compress(b"foobar")
 
         dctx = zstd.ZstdDecompressor()
         dobj = dctx.decompressobj()
-        self.assertEqual(dobj.decompress(data), b'foobar')
+        self.assertEqual(dobj.decompress(data), b"foobar")
         self.assertIsNone(dobj.flush())
         self.assertIsNone(dobj.flush(10))
         self.assertIsNone(dobj.flush(length=100))
 
     def test_input_types(self):
-        compressed = zstd.ZstdCompressor(level=1).compress(b'foo')
+        compressed = zstd.ZstdCompressor(level=1).compress(b"foo")
 
         dctx = zstd.ZstdDecompressor()
 
@@ -795,28 +811,28 @@
             self.assertIsNone(dobj.flush())
             self.assertIsNone(dobj.flush(10))
             self.assertIsNone(dobj.flush(length=100))
-            self.assertEqual(dobj.decompress(source), b'foo')
+            self.assertEqual(dobj.decompress(source), b"foo")
             self.assertIsNone(dobj.flush())
 
     def test_reuse(self):
-        data = zstd.ZstdCompressor(level=1).compress(b'foobar')
+        data = zstd.ZstdCompressor(level=1).compress(b"foobar")
 
         dctx = zstd.ZstdDecompressor()
         dobj = dctx.decompressobj()
         dobj.decompress(data)
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'cannot use a decompressobj'):
+        with self.assertRaisesRegex(zstd.ZstdError, "cannot use a decompressobj"):
             dobj.decompress(data)
             self.assertIsNone(dobj.flush())
 
     def test_bad_write_size(self):
         dctx = zstd.ZstdDecompressor()
 
-        with self.assertRaisesRegexp(ValueError, 'write_size must be positive'):
+        with self.assertRaisesRegex(ValueError, "write_size must be positive"):
             dctx.decompressobj(write_size=0)
 
     def test_write_size(self):
-        source = b'foo' * 64 + b'bar' * 128
+        source = b"foo" * 64 + b"bar" * 128
         data = zstd.ZstdCompressor(level=1).compress(source)
 
         dctx = zstd.ZstdDecompressor()
@@ -836,7 +852,7 @@
 
 
 @make_cffi
-class TestDecompressor_stream_writer(unittest.TestCase):
+class TestDecompressor_stream_writer(TestCase):
     def test_io_api(self):
         buffer = io.BytesIO()
         dctx = zstd.ZstdDecompressor()
@@ -908,14 +924,14 @@
             writer.fileno()
 
     def test_fileno_file(self):
-        with tempfile.TemporaryFile('wb') as tf:
+        with tempfile.TemporaryFile("wb") as tf:
             dctx = zstd.ZstdDecompressor()
             writer = dctx.stream_writer(tf)
 
             self.assertEqual(writer.fileno(), tf.fileno())
 
     def test_close(self):
-        foo = zstd.ZstdCompressor().compress(b'foo')
+        foo = zstd.ZstdCompressor().compress(b"foo")
 
         buffer = NonClosingBytesIO()
         dctx = zstd.ZstdDecompressor()
@@ -928,17 +944,17 @@
         self.assertTrue(writer.closed)
         self.assertTrue(buffer.closed)
 
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
-            writer.write(b'')
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
+            writer.write(b"")
 
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
             writer.flush()
 
-        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+        with self.assertRaisesRegex(ValueError, "stream is closed"):
             with writer:
                 pass
 
-        self.assertEqual(buffer.getvalue(), b'foo')
+        self.assertEqual(buffer.getvalue(), b"foo")
 
         # Context manager exit should close stream.
         buffer = NonClosingBytesIO()
@@ -948,7 +964,7 @@
             writer.write(foo)
 
         self.assertTrue(writer.closed)
-        self.assertEqual(buffer.getvalue(), b'foo')
+        self.assertEqual(buffer.getvalue(), b"foo")
 
     def test_flush(self):
         buffer = OpCountingBytesIO()
@@ -962,12 +978,12 @@
 
     def test_empty_roundtrip(self):
         cctx = zstd.ZstdCompressor()
-        empty = cctx.compress(b'')
-        self.assertEqual(decompress_via_writer(empty), b'')
+        empty = cctx.compress(b"")
+        self.assertEqual(decompress_via_writer(empty), b"")
 
     def test_input_types(self):
         cctx = zstd.ZstdCompressor(level=1)
-        compressed = cctx.compress(b'foo')
+        compressed = cctx.compress(b"foo")
 
         mutable_array = bytearray(len(compressed))
         mutable_array[:] = compressed
@@ -984,25 +1000,25 @@
 
             decompressor = dctx.stream_writer(buffer)
             decompressor.write(source)
-            self.assertEqual(buffer.getvalue(), b'foo')
+            self.assertEqual(buffer.getvalue(), b"foo")
 
             buffer = NonClosingBytesIO()
 
             with dctx.stream_writer(buffer) as decompressor:
                 self.assertEqual(decompressor.write(source), 3)
 
-            self.assertEqual(buffer.getvalue(), b'foo')
+            self.assertEqual(buffer.getvalue(), b"foo")
 
             buffer = io.BytesIO()
             writer = dctx.stream_writer(buffer, write_return_read=True)
             self.assertEqual(writer.write(source), len(source))
-            self.assertEqual(buffer.getvalue(), b'foo')
+            self.assertEqual(buffer.getvalue(), b"foo")
 
     def test_large_roundtrip(self):
         chunks = []
         for i in range(255):
-            chunks.append(struct.Struct('>B').pack(i) * 16384)
-        orig = b''.join(chunks)
+            chunks.append(struct.Struct(">B").pack(i) * 16384)
+        orig = b"".join(chunks)
         cctx = zstd.ZstdCompressor()
         compressed = cctx.compress(orig)
 
@@ -1012,9 +1028,9 @@
         chunks = []
         for i in range(255):
             for j in range(255):
-                chunks.append(struct.Struct('>B').pack(j) * i)
+                chunks.append(struct.Struct(">B").pack(j) * i)
 
-        orig = b''.join(chunks)
+        orig = b"".join(chunks)
         cctx = zstd.ZstdCompressor()
         compressed = cctx.compress(orig)
 
@@ -1042,13 +1058,13 @@
     def test_dictionary(self):
         samples = []
         for i in range(128):
-            samples.append(b'foo' * 64)
-            samples.append(b'bar' * 64)
-            samples.append(b'foobar' * 64)
+            samples.append(b"foo" * 64)
+            samples.append(b"bar" * 64)
+            samples.append(b"foobar" * 64)
 
         d = zstd.train_dictionary(8192, samples)
 
-        orig = b'foobar' * 16384
+        orig = b"foobar" * 16384
         buffer = NonClosingBytesIO()
         cctx = zstd.ZstdCompressor(dict_data=d)
         with cctx.stream_writer(buffer) as compressor:
@@ -1083,22 +1099,22 @@
         self.assertGreater(size, 100000)
 
     def test_write_size(self):
-        source = zstd.ZstdCompressor().compress(b'foobarfoobar')
+        source = zstd.ZstdCompressor().compress(b"foobarfoobar")
         dest = OpCountingBytesIO()
         dctx = zstd.ZstdDecompressor()
         with dctx.stream_writer(dest, write_size=1) as decompressor:
-            s = struct.Struct('>B')
+            s = struct.Struct(">B")
             for c in source:
                 if not isinstance(c, str):
                     c = s.pack(c)
                 decompressor.write(c)
 
-        self.assertEqual(dest.getvalue(), b'foobarfoobar')
+        self.assertEqual(dest.getvalue(), b"foobarfoobar")
         self.assertEqual(dest._write_count, len(dest.getvalue()))
 
 
 @make_cffi
-class TestDecompressor_read_to_iter(unittest.TestCase):
+class TestDecompressor_read_to_iter(TestCase):
     def test_type_validation(self):
         dctx = zstd.ZstdDecompressor()
 
@@ -1106,10 +1122,10 @@
         dctx.read_to_iter(io.BytesIO())
 
         # Buffer protocol works.
-        dctx.read_to_iter(b'foobar')
+        dctx.read_to_iter(b"foobar")
 
-        with self.assertRaisesRegexp(ValueError, 'must pass an object with a read'):
-            b''.join(dctx.read_to_iter(True))
+        with self.assertRaisesRegex(ValueError, "must pass an object with a read"):
+            b"".join(dctx.read_to_iter(True))
 
     def test_empty_input(self):
         dctx = zstd.ZstdDecompressor()
@@ -1120,25 +1136,25 @@
         with self.assertRaises(StopIteration):
             next(it)
 
-        it = dctx.read_to_iter(b'')
+        it = dctx.read_to_iter(b"")
         with self.assertRaises(StopIteration):
             next(it)
 
     def test_invalid_input(self):
         dctx = zstd.ZstdDecompressor()
 
-        source = io.BytesIO(b'foobar')
+        source = io.BytesIO(b"foobar")
         it = dctx.read_to_iter(source)
-        with self.assertRaisesRegexp(zstd.ZstdError, 'Unknown frame descriptor'):
+        with self.assertRaisesRegex(zstd.ZstdError, "Unknown frame descriptor"):
             next(it)
 
-        it = dctx.read_to_iter(b'foobar')
-        with self.assertRaisesRegexp(zstd.ZstdError, 'Unknown frame descriptor'):
+        it = dctx.read_to_iter(b"foobar")
+        with self.assertRaisesRegex(zstd.ZstdError, "Unknown frame descriptor"):
             next(it)
 
     def test_empty_roundtrip(self):
         cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
-        empty = cctx.compress(b'')
+        empty = cctx.compress(b"")
 
         source = io.BytesIO(empty)
         source.seek(0)
@@ -1157,24 +1173,28 @@
     def test_skip_bytes_too_large(self):
         dctx = zstd.ZstdDecompressor()
 
-        with self.assertRaisesRegexp(ValueError, 'skip_bytes must be smaller than read_size'):
-            b''.join(dctx.read_to_iter(b'', skip_bytes=1, read_size=1))
+        with self.assertRaisesRegex(
+            ValueError, "skip_bytes must be smaller than read_size"
+        ):
+            b"".join(dctx.read_to_iter(b"", skip_bytes=1, read_size=1))
 
-        with self.assertRaisesRegexp(ValueError, 'skip_bytes larger than first input chunk'):
-            b''.join(dctx.read_to_iter(b'foobar', skip_bytes=10))
+        with self.assertRaisesRegex(
+            ValueError, "skip_bytes larger than first input chunk"
+        ):
+            b"".join(dctx.read_to_iter(b"foobar", skip_bytes=10))
 
     def test_skip_bytes(self):
         cctx = zstd.ZstdCompressor(write_content_size=False)
-        compressed = cctx.compress(b'foobar')
+        compressed = cctx.compress(b"foobar")
 
         dctx = zstd.ZstdDecompressor()
-        output = b''.join(dctx.read_to_iter(b'hdr' + compressed, skip_bytes=3))
-        self.assertEqual(output, b'foobar')
+        output = b"".join(dctx.read_to_iter(b"hdr" + compressed, skip_bytes=3))
+        self.assertEqual(output, b"foobar")
 
     def test_large_output(self):
         source = io.BytesIO()
-        source.write(b'f' * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
-        source.write(b'o')
+        source.write(b"f" * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+        source.write(b"o")
         source.seek(0)
 
         cctx = zstd.ZstdCompressor(level=1)
@@ -1191,7 +1211,7 @@
         with self.assertRaises(StopIteration):
             next(it)
 
-        decompressed = b''.join(chunks)
+        decompressed = b"".join(chunks)
         self.assertEqual(decompressed, source.getvalue())
 
         # And again with buffer protocol.
@@ -1203,12 +1223,12 @@
         with self.assertRaises(StopIteration):
             next(it)
 
-        decompressed = b''.join(chunks)
+        decompressed = b"".join(chunks)
         self.assertEqual(decompressed, source.getvalue())
 
-    @unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+    @unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
     def test_large_input(self):
-        bytes = list(struct.Struct('>B').pack(i) for i in range(256))
+        bytes = list(struct.Struct(">B").pack(i) for i in range(256))
         compressed = NonClosingBytesIO()
         input_size = 0
         cctx = zstd.ZstdCompressor(level=1)
@@ -1217,14 +1237,18 @@
                 compressor.write(random.choice(bytes))
                 input_size += 1
 
-                have_compressed = len(compressed.getvalue()) > zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
+                have_compressed = (
+                    len(compressed.getvalue())
+                    > zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
+                )
                 have_raw = input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
                 if have_compressed and have_raw:
                     break
 
         compressed = io.BytesIO(compressed.getvalue())
-        self.assertGreater(len(compressed.getvalue()),
-                           zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE)
+        self.assertGreater(
+            len(compressed.getvalue()), zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
+        )
 
         dctx = zstd.ZstdDecompressor()
         it = dctx.read_to_iter(compressed)
@@ -1237,7 +1261,7 @@
         with self.assertRaises(StopIteration):
             next(it)
 
-        decompressed = b''.join(chunks)
+        decompressed = b"".join(chunks)
         self.assertEqual(len(decompressed), input_size)
 
         # And again with buffer protocol.
@@ -1251,7 +1275,7 @@
         with self.assertRaises(StopIteration):
             next(it)
 
-        decompressed = b''.join(chunks)
+        decompressed = b"".join(chunks)
         self.assertEqual(len(decompressed), input_size)
 
     def test_interesting(self):
@@ -1263,22 +1287,23 @@
         compressed = NonClosingBytesIO()
         with cctx.stream_writer(compressed) as compressor:
             for i in range(256):
-                chunk = b'\0' * 1024
+                chunk = b"\0" * 1024
                 compressor.write(chunk)
                 source.write(chunk)
 
         dctx = zstd.ZstdDecompressor()
 
-        simple = dctx.decompress(compressed.getvalue(),
-                                 max_output_size=len(source.getvalue()))
+        simple = dctx.decompress(
+            compressed.getvalue(), max_output_size=len(source.getvalue())
+        )
         self.assertEqual(simple, source.getvalue())
 
         compressed = io.BytesIO(compressed.getvalue())
-        streamed = b''.join(dctx.read_to_iter(compressed))
+        streamed = b"".join(dctx.read_to_iter(compressed))
         self.assertEqual(streamed, source.getvalue())
 
     def test_read_write_size(self):
-        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b'foobarfoobar'))
+        source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar"))
         dctx = zstd.ZstdDecompressor()
         for chunk in dctx.read_to_iter(source, read_size=1, write_size=1):
             self.assertEqual(len(chunk), 1)
@@ -1287,97 +1312,110 @@
 
     def test_magic_less(self):
         params = zstd.CompressionParameters.from_level(
-            1, format=zstd.FORMAT_ZSTD1_MAGICLESS)
+            1, format=zstd.FORMAT_ZSTD1_MAGICLESS
+        )
         cctx = zstd.ZstdCompressor(compression_params=params)
-        frame = cctx.compress(b'foobar')
+        frame = cctx.compress(b"foobar")
 
-        self.assertNotEqual(frame[0:4], b'\x28\xb5\x2f\xfd')
+        self.assertNotEqual(frame[0:4], b"\x28\xb5\x2f\xfd")
 
         dctx = zstd.ZstdDecompressor()
-        with self.assertRaisesRegexp(
-            zstd.ZstdError, 'error determining content size from frame header'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "error determining content size from frame header"
+        ):
             dctx.decompress(frame)
 
         dctx = zstd.ZstdDecompressor(format=zstd.FORMAT_ZSTD1_MAGICLESS)
-        res = b''.join(dctx.read_to_iter(frame))
-        self.assertEqual(res, b'foobar')
+        res = b"".join(dctx.read_to_iter(frame))
+        self.assertEqual(res, b"foobar")
 
 
 @make_cffi
-class TestDecompressor_content_dict_chain(unittest.TestCase):
+class TestDecompressor_content_dict_chain(TestCase):
     def test_bad_inputs_simple(self):
         dctx = zstd.ZstdDecompressor()
 
         with self.assertRaises(TypeError):
-            dctx.decompress_content_dict_chain(b'foo')
+            dctx.decompress_content_dict_chain(b"foo")
 
         with self.assertRaises(TypeError):
-            dctx.decompress_content_dict_chain((b'foo', b'bar'))
+            dctx.decompress_content_dict_chain((b"foo", b"bar"))
 
-        with self.assertRaisesRegexp(ValueError, 'empty input chain'):
+        with self.assertRaisesRegex(ValueError, "empty input chain"):
             dctx.decompress_content_dict_chain([])
 
-        with self.assertRaisesRegexp(ValueError, 'chunk 0 must be bytes'):
-            dctx.decompress_content_dict_chain([u'foo'])
+        with self.assertRaisesRegex(ValueError, "chunk 0 must be bytes"):
+            dctx.decompress_content_dict_chain([u"foo"])
 
-        with self.assertRaisesRegexp(ValueError, 'chunk 0 must be bytes'):
+        with self.assertRaisesRegex(ValueError, "chunk 0 must be bytes"):
             dctx.decompress_content_dict_chain([True])
 
-        with self.assertRaisesRegexp(ValueError, 'chunk 0 is too small to contain a zstd frame'):
+        with self.assertRaisesRegex(
+            ValueError, "chunk 0 is too small to contain a zstd frame"
+        ):
             dctx.decompress_content_dict_chain([zstd.FRAME_HEADER])
 
-        with self.assertRaisesRegexp(ValueError, 'chunk 0 is not a valid zstd frame'):
-            dctx.decompress_content_dict_chain([b'foo' * 8])
+        with self.assertRaisesRegex(ValueError, "chunk 0 is not a valid zstd frame"):
+            dctx.decompress_content_dict_chain([b"foo" * 8])
 
-        no_size = zstd.ZstdCompressor(write_content_size=False).compress(b'foo' * 64)
+        no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64)
 
-        with self.assertRaisesRegexp(ValueError, 'chunk 0 missing content size in frame'):
+        with self.assertRaisesRegex(
+            ValueError, "chunk 0 missing content size in frame"
+        ):
             dctx.decompress_content_dict_chain([no_size])
 
         # Corrupt first frame.
-        frame = zstd.ZstdCompressor().compress(b'foo' * 64)
+        frame = zstd.ZstdCompressor().compress(b"foo" * 64)
         frame = frame[0:12] + frame[15:]
-        with self.assertRaisesRegexp(zstd.ZstdError,
-                                     'chunk 0 did not decompress full frame'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "chunk 0 did not decompress full frame"
+        ):
             dctx.decompress_content_dict_chain([frame])
 
     def test_bad_subsequent_input(self):
-        initial = zstd.ZstdCompressor().compress(b'foo' * 64)
+        initial = zstd.ZstdCompressor().compress(b"foo" * 64)
 
         dctx = zstd.ZstdDecompressor()
 
-        with self.assertRaisesRegexp(ValueError, 'chunk 1 must be bytes'):
-            dctx.decompress_content_dict_chain([initial, u'foo'])
+        with self.assertRaisesRegex(ValueError, "chunk 1 must be bytes"):
+            dctx.decompress_content_dict_chain([initial, u"foo"])
 
-        with self.assertRaisesRegexp(ValueError, 'chunk 1 must be bytes'):
+        with self.assertRaisesRegex(ValueError, "chunk 1 must be bytes"):
             dctx.decompress_content_dict_chain([initial, None])
 
-        with self.assertRaisesRegexp(ValueError, 'chunk 1 is too small to contain a zstd frame'):
+        with self.assertRaisesRegex(
+            ValueError, "chunk 1 is too small to contain a zstd frame"
+        ):
             dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER])
 
-        with self.assertRaisesRegexp(ValueError, 'chunk 1 is not a valid zstd frame'):
-            dctx.decompress_content_dict_chain([initial, b'foo' * 8])
+        with self.assertRaisesRegex(ValueError, "chunk 1 is not a valid zstd frame"):
+            dctx.decompress_content_dict_chain([initial, b"foo" * 8])
 
-        no_size = zstd.ZstdCompressor(write_content_size=False).compress(b'foo' * 64)
+        no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64)
 
-        with self.assertRaisesRegexp(ValueError, 'chunk 1 missing content size in frame'):
+        with self.assertRaisesRegex(
+            ValueError, "chunk 1 missing content size in frame"
+        ):
             dctx.decompress_content_dict_chain([initial, no_size])
 
         # Corrupt second frame.
-        cctx = zstd.ZstdCompressor(dict_data=zstd.ZstdCompressionDict(b'foo' * 64))
-        frame = cctx.compress(b'bar' * 64)
+        cctx = zstd.ZstdCompressor(dict_data=zstd.ZstdCompressionDict(b"foo" * 64))
+        frame = cctx.compress(b"bar" * 64)
         frame = frame[0:12] + frame[15:]
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'chunk 1 did not decompress full frame'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError, "chunk 1 did not decompress full frame"
+        ):
             dctx.decompress_content_dict_chain([initial, frame])
 
     def test_simple(self):
         original = [
-            b'foo' * 64,
-            b'foobar' * 64,
-            b'baz' * 64,
-            b'foobaz' * 64,
-            b'foobarbaz' * 64,
+            b"foo" * 64,
+            b"foobar" * 64,
+            b"baz" * 64,
+            b"foobaz" * 64,
+            b"foobarbaz" * 64,
         ]
 
         chunks = []
@@ -1396,12 +1434,12 @@
 
 
 # TODO enable for CFFI
-class TestDecompressor_multi_decompress_to_buffer(unittest.TestCase):
+class TestDecompressor_multi_decompress_to_buffer(TestCase):
     def test_invalid_inputs(self):
         dctx = zstd.ZstdDecompressor()
 
-        if not hasattr(dctx, 'multi_decompress_to_buffer'):
-            self.skipTest('multi_decompress_to_buffer not available')
+        if not hasattr(dctx, "multi_decompress_to_buffer"):
+            self.skipTest("multi_decompress_to_buffer not available")
 
         with self.assertRaises(TypeError):
             dctx.multi_decompress_to_buffer(True)
@@ -1409,22 +1447,24 @@
         with self.assertRaises(TypeError):
             dctx.multi_decompress_to_buffer((1, 2))
 
-        with self.assertRaisesRegexp(TypeError, 'item 0 not a bytes like object'):
-            dctx.multi_decompress_to_buffer([u'foo'])
+        with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"):
+            dctx.multi_decompress_to_buffer([u"foo"])
 
-        with self.assertRaisesRegexp(ValueError, 'could not determine decompressed size of item 0'):
-            dctx.multi_decompress_to_buffer([b'foobarbaz'])
+        with self.assertRaisesRegex(
+            ValueError, "could not determine decompressed size of item 0"
+        ):
+            dctx.multi_decompress_to_buffer([b"foobarbaz"])
 
     def test_list_input(self):
         cctx = zstd.ZstdCompressor()
 
-        original = [b'foo' * 4, b'bar' * 6]
+        original = [b"foo" * 4, b"bar" * 6]
         frames = [cctx.compress(d) for d in original]
 
         dctx = zstd.ZstdDecompressor()
 
-        if not hasattr(dctx, 'multi_decompress_to_buffer'):
-            self.skipTest('multi_decompress_to_buffer not available')
+        if not hasattr(dctx, "multi_decompress_to_buffer"):
+            self.skipTest("multi_decompress_to_buffer not available")
 
         result = dctx.multi_decompress_to_buffer(frames)
 
@@ -1442,14 +1482,14 @@
     def test_list_input_frame_sizes(self):
         cctx = zstd.ZstdCompressor()
 
-        original = [b'foo' * 4, b'bar' * 6, b'baz' * 8]
+        original = [b"foo" * 4, b"bar" * 6, b"baz" * 8]
         frames = [cctx.compress(d) for d in original]
-        sizes = struct.pack('=' + 'Q' * len(original), *map(len, original))
+        sizes = struct.pack("=" + "Q" * len(original), *map(len, original))
 
         dctx = zstd.ZstdDecompressor()
 
-        if not hasattr(dctx, 'multi_decompress_to_buffer'):
-            self.skipTest('multi_decompress_to_buffer not available')
+        if not hasattr(dctx, "multi_decompress_to_buffer"):
+            self.skipTest("multi_decompress_to_buffer not available")
 
         result = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes)
 
@@ -1462,16 +1502,18 @@
     def test_buffer_with_segments_input(self):
         cctx = zstd.ZstdCompressor()
 
-        original = [b'foo' * 4, b'bar' * 6]
+        original = [b"foo" * 4, b"bar" * 6]
         frames = [cctx.compress(d) for d in original]
 
         dctx = zstd.ZstdDecompressor()
 
-        if not hasattr(dctx, 'multi_decompress_to_buffer'):
-            self.skipTest('multi_decompress_to_buffer not available')
+        if not hasattr(dctx, "multi_decompress_to_buffer"):
+            self.skipTest("multi_decompress_to_buffer not available")
 
-        segments = struct.pack('=QQQQ', 0, len(frames[0]), len(frames[0]), len(frames[1]))
-        b = zstd.BufferWithSegments(b''.join(frames), segments)
+        segments = struct.pack(
+            "=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1])
+        )
+        b = zstd.BufferWithSegments(b"".join(frames), segments)
 
         result = dctx.multi_decompress_to_buffer(b)
 
@@ -1483,19 +1525,25 @@
 
     def test_buffer_with_segments_sizes(self):
         cctx = zstd.ZstdCompressor(write_content_size=False)
-        original = [b'foo' * 4, b'bar' * 6, b'baz' * 8]
+        original = [b"foo" * 4, b"bar" * 6, b"baz" * 8]
         frames = [cctx.compress(d) for d in original]
-        sizes = struct.pack('=' + 'Q' * len(original), *map(len, original))
+        sizes = struct.pack("=" + "Q" * len(original), *map(len, original))
 
         dctx = zstd.ZstdDecompressor()
 
-        if not hasattr(dctx, 'multi_decompress_to_buffer'):
-            self.skipTest('multi_decompress_to_buffer not available')
+        if not hasattr(dctx, "multi_decompress_to_buffer"):
+            self.skipTest("multi_decompress_to_buffer not available")
 
-        segments = struct.pack('=QQQQQQ', 0, len(frames[0]),
-                               len(frames[0]), len(frames[1]),
-                               len(frames[0]) + len(frames[1]), len(frames[2]))
-        b = zstd.BufferWithSegments(b''.join(frames), segments)
+        segments = struct.pack(
+            "=QQQQQQ",
+            0,
+            len(frames[0]),
+            len(frames[0]),
+            len(frames[1]),
+            len(frames[0]) + len(frames[1]),
+            len(frames[2]),
+        )
+        b = zstd.BufferWithSegments(b"".join(frames), segments)
 
         result = dctx.multi_decompress_to_buffer(b, decompressed_sizes=sizes)
 
@@ -1509,15 +1557,15 @@
         cctx = zstd.ZstdCompressor()
 
         original = [
-            b'foo0' * 2,
-            b'foo1' * 3,
-            b'foo2' * 4,
-            b'foo3' * 5,
-            b'foo4' * 6,
+            b"foo0" * 2,
+            b"foo1" * 3,
+            b"foo2" * 4,
+            b"foo3" * 5,
+            b"foo4" * 6,
         ]
 
-        if not hasattr(cctx, 'multi_compress_to_buffer'):
-            self.skipTest('multi_compress_to_buffer not available')
+        if not hasattr(cctx, "multi_compress_to_buffer"):
+            self.skipTest("multi_compress_to_buffer not available")
 
         frames = cctx.multi_compress_to_buffer(original)
 
@@ -1532,16 +1580,24 @@
             self.assertEqual(data, decompressed[i].tobytes())
 
         # And a manual mode.
-        b = b''.join([frames[0].tobytes(), frames[1].tobytes()])
-        b1 = zstd.BufferWithSegments(b, struct.pack('=QQQQ',
-                                                    0, len(frames[0]),
-                                                    len(frames[0]), len(frames[1])))
+        b = b"".join([frames[0].tobytes(), frames[1].tobytes()])
+        b1 = zstd.BufferWithSegments(
+            b, struct.pack("=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1]))
+        )
 
-        b = b''.join([frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()])
-        b2 = zstd.BufferWithSegments(b, struct.pack('=QQQQQQ',
-                                                    0, len(frames[2]),
-                                                    len(frames[2]), len(frames[3]),
-                                                    len(frames[2]) + len(frames[3]), len(frames[4])))
+        b = b"".join([frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()])
+        b2 = zstd.BufferWithSegments(
+            b,
+            struct.pack(
+                "=QQQQQQ",
+                0,
+                len(frames[2]),
+                len(frames[2]),
+                len(frames[3]),
+                len(frames[2]) + len(frames[3]),
+                len(frames[4]),
+            ),
+        )
 
         c = zstd.BufferWithSegmentsCollection(b1, b2)
 
@@ -1560,8 +1616,8 @@
 
         dctx = zstd.ZstdDecompressor(dict_data=d)
 
-        if not hasattr(dctx, 'multi_decompress_to_buffer'):
-            self.skipTest('multi_decompress_to_buffer not available')
+        if not hasattr(dctx, "multi_decompress_to_buffer"):
+            self.skipTest("multi_decompress_to_buffer not available")
 
         result = dctx.multi_decompress_to_buffer(frames)
 
@@ -1571,41 +1627,44 @@
         cctx = zstd.ZstdCompressor()
 
         frames = []
-        frames.extend(cctx.compress(b'x' * 64) for i in range(256))
-        frames.extend(cctx.compress(b'y' * 64) for i in range(256))
+        frames.extend(cctx.compress(b"x" * 64) for i in range(256))
+        frames.extend(cctx.compress(b"y" * 64) for i in range(256))
 
         dctx = zstd.ZstdDecompressor()
 
-        if not hasattr(dctx, 'multi_decompress_to_buffer'):
-            self.skipTest('multi_decompress_to_buffer not available')
+        if not hasattr(dctx, "multi_decompress_to_buffer"):
+            self.skipTest("multi_decompress_to_buffer not available")
 
         result = dctx.multi_decompress_to_buffer(frames, threads=-1)
 
         self.assertEqual(len(result), len(frames))
         self.assertEqual(result.size(), 2 * 64 * 256)
-        self.assertEqual(result[0].tobytes(), b'x' * 64)
-        self.assertEqual(result[256].tobytes(), b'y' * 64)
+        self.assertEqual(result[0].tobytes(), b"x" * 64)
+        self.assertEqual(result[256].tobytes(), b"y" * 64)
 
     def test_item_failure(self):
         cctx = zstd.ZstdCompressor()
-        frames = [cctx.compress(b'x' * 128), cctx.compress(b'y' * 128)]
+        frames = [cctx.compress(b"x" * 128), cctx.compress(b"y" * 128)]
 
-        frames[1] = frames[1][0:15] + b'extra' + frames[1][15:]
+        frames[1] = frames[1][0:15] + b"extra" + frames[1][15:]
 
         dctx = zstd.ZstdDecompressor()
 
-        if not hasattr(dctx, 'multi_decompress_to_buffer'):
-            self.skipTest('multi_decompress_to_buffer not available')
+        if not hasattr(dctx, "multi_decompress_to_buffer"):
+            self.skipTest("multi_decompress_to_buffer not available")
 
-        with self.assertRaisesRegexp(zstd.ZstdError,
-                                     'error decompressing item 1: ('
-                                     'Corrupted block|'
-                                     'Destination buffer is too small)'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError,
+            "error decompressing item 1: ("
+            "Corrupted block|"
+            "Destination buffer is too small)",
+        ):
             dctx.multi_decompress_to_buffer(frames)
 
-        with self.assertRaisesRegexp(zstd.ZstdError,
-                            'error decompressing item 1: ('
-                            'Corrupted block|'
-                            'Destination buffer is too small)'):
+        with self.assertRaisesRegex(
+            zstd.ZstdError,
+            "error decompressing item 1: ("
+            "Corrupted block|"
+            "Destination buffer is too small)",
+        ):
             dctx.multi_decompress_to_buffer(frames, threads=2)
-
--- a/contrib/python-zstandard/tests/test_decompressor_fuzzing.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/test_decompressor_fuzzing.py	Tue Jan 21 13:14:51 2020 -0500
@@ -6,29 +6,37 @@
     import hypothesis
     import hypothesis.strategies as strategies
 except ImportError:
-    raise unittest.SkipTest('hypothesis not available')
+    raise unittest.SkipTest("hypothesis not available")
 
 import zstandard as zstd
 
-from . common import (
+from .common import (
     make_cffi,
     NonClosingBytesIO,
     random_input_data,
+    TestCase,
 )
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestDecompressor_stream_reader_fuzzing(unittest.TestCase):
+class TestDecompressor_stream_reader_fuzzing(TestCase):
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      streaming=strategies.booleans(),
-                      source_read_size=strategies.integers(1, 1048576),
-                      read_sizes=strategies.data())
-    def test_stream_source_read_variance(self, original, level, streaming,
-                                         source_read_size, read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        streaming=strategies.booleans(),
+        source_read_size=strategies.integers(1, 1048576),
+        read_sizes=strategies.data(),
+    )
+    def test_stream_source_read_variance(
+        self, original, level, streaming, source_read_size, read_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
 
         if streaming:
@@ -53,18 +61,22 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), original)
+        self.assertEqual(b"".join(chunks), original)
 
     # Similar to above except we have a constant read() size.
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      streaming=strategies.booleans(),
-                      source_read_size=strategies.integers(1, 1048576),
-                      read_size=strategies.integers(-1, 131072))
-    def test_stream_source_read_size(self, original, level, streaming,
-                                     source_read_size, read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        streaming=strategies.booleans(),
+        source_read_size=strategies.integers(1, 1048576),
+        read_size=strategies.integers(-1, 131072),
+    )
+    def test_stream_source_read_size(
+        self, original, level, streaming, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = 1
 
@@ -91,17 +103,24 @@
 
             chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), original)
+        self.assertEqual(b"".join(chunks), original)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      streaming=strategies.booleans(),
-                      source_read_size=strategies.integers(1, 1048576),
-                      read_sizes=strategies.data())
-    def test_buffer_source_read_variance(self, original, level, streaming,
-                                         source_read_size, read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        streaming=strategies.booleans(),
+        source_read_size=strategies.integers(1, 1048576),
+        read_sizes=strategies.data(),
+    )
+    def test_buffer_source_read_variance(
+        self, original, level, streaming, source_read_size, read_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
 
         if streaming:
@@ -125,18 +144,22 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), original)
+        self.assertEqual(b"".join(chunks), original)
 
     # Similar to above except we have a constant read() size.
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      streaming=strategies.booleans(),
-                      source_read_size=strategies.integers(1, 1048576),
-                      read_size=strategies.integers(-1, 131072))
-    def test_buffer_source_constant_read_size(self, original, level, streaming,
-                                              source_read_size, read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        streaming=strategies.booleans(),
+        source_read_size=strategies.integers(1, 1048576),
+        read_size=strategies.integers(-1, 131072),
+    )
+    def test_buffer_source_constant_read_size(
+        self, original, level, streaming, source_read_size, read_size
+    ):
         if read_size == 0:
             read_size = -1
 
@@ -162,16 +185,18 @@
 
             chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), original)
+        self.assertEqual(b"".join(chunks), original)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      streaming=strategies.booleans(),
-                      source_read_size=strategies.integers(1, 1048576))
-    def test_stream_source_readall(self, original, level, streaming,
-                                         source_read_size):
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        streaming=strategies.booleans(),
+        source_read_size=strategies.integers(1, 1048576),
+    )
+    def test_stream_source_readall(self, original, level, streaming, source_read_size):
         cctx = zstd.ZstdCompressor(level=level)
 
         if streaming:
@@ -190,14 +215,21 @@
         self.assertEqual(data, original)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      streaming=strategies.booleans(),
-                      source_read_size=strategies.integers(1, 1048576),
-                      read_sizes=strategies.data())
-    def test_stream_source_read1_variance(self, original, level, streaming,
-                                          source_read_size, read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        streaming=strategies.booleans(),
+        source_read_size=strategies.integers(1, 1048576),
+        read_sizes=strategies.data(),
+    )
+    def test_stream_source_read1_variance(
+        self, original, level, streaming, source_read_size, read_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
 
         if streaming:
@@ -222,17 +254,24 @@
 
                 chunks.append(chunk)
 
-        self.assertEqual(b''.join(chunks), original)
+        self.assertEqual(b"".join(chunks), original)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      streaming=strategies.booleans(),
-                      source_read_size=strategies.integers(1, 1048576),
-                      read_sizes=strategies.data())
-    def test_stream_source_readinto1_variance(self, original, level, streaming,
-                                          source_read_size, read_sizes):
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        streaming=strategies.booleans(),
+        source_read_size=strategies.integers(1, 1048576),
+        read_sizes=strategies.data(),
+    )
+    def test_stream_source_readinto1_variance(
+        self, original, level, streaming, source_read_size, read_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
 
         if streaming:
@@ -259,18 +298,24 @@
 
                 chunks.append(bytes(b[0:count]))
 
-        self.assertEqual(b''.join(chunks), original)
+        self.assertEqual(b"".join(chunks), original)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
     @hypothesis.given(
         original=strategies.sampled_from(random_input_data()),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 1048576),
         seek_amounts=strategies.data(),
-        read_sizes=strategies.data())
-    def test_relative_seeks(self, original, level, source_read_size, seek_amounts,
-                            read_sizes):
+        read_sizes=strategies.data(),
+    )
+    def test_relative_seeks(
+        self, original, level, source_read_size, seek_amounts, read_sizes
+    ):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
 
@@ -288,18 +333,24 @@
                 if not chunk:
                     break
 
-                self.assertEqual(original[offset:offset + len(chunk)], chunk)
+                self.assertEqual(original[offset : offset + len(chunk)], chunk)
 
     @hypothesis.settings(
-        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
     @hypothesis.given(
         originals=strategies.data(),
         frame_count=strategies.integers(min_value=2, max_value=10),
         level=strategies.integers(min_value=1, max_value=5),
         source_read_size=strategies.integers(1, 1048576),
-        read_sizes=strategies.data())
-    def test_multiple_frames(self, originals, frame_count, level,
-                             source_read_size, read_sizes):
+        read_sizes=strategies.data(),
+    )
+    def test_multiple_frames(
+        self, originals, frame_count, level, source_read_size, read_sizes
+    ):
 
         cctx = zstd.ZstdCompressor(level=level)
         source = io.BytesIO()
@@ -314,8 +365,9 @@
 
         dctx = zstd.ZstdDecompressor()
         buffer.seek(0)
-        reader = dctx.stream_reader(buffer, read_size=source_read_size,
-                                    read_across_frames=True)
+        reader = dctx.stream_reader(
+            buffer, read_size=source_read_size, read_across_frames=True
+        )
 
         chunks = []
 
@@ -328,16 +380,24 @@
 
             chunks.append(chunk)
 
-        self.assertEqual(source.getvalue(), b''.join(chunks))
+        self.assertEqual(source.getvalue(), b"".join(chunks))
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestDecompressor_stream_writer_fuzzing(unittest.TestCase):
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      write_size=strategies.integers(min_value=1, max_value=8192),
-                      input_sizes=strategies.data())
+class TestDecompressor_stream_writer_fuzzing(TestCase):
+    @hypothesis.settings(
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        write_size=strategies.integers(min_value=1, max_value=8192),
+        input_sizes=strategies.data(),
+    )
     def test_write_size_variance(self, original, level, write_size, input_sizes):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
@@ -358,13 +418,21 @@
         self.assertEqual(dest.getvalue(), original)
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestDecompressor_copy_stream_fuzzing(unittest.TestCase):
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      read_size=strategies.integers(min_value=1, max_value=8192),
-                      write_size=strategies.integers(min_value=1, max_value=8192))
+class TestDecompressor_copy_stream_fuzzing(TestCase):
+    @hypothesis.settings(
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        read_size=strategies.integers(min_value=1, max_value=8192),
+        write_size=strategies.integers(min_value=1, max_value=8192),
+    )
     def test_read_write_size_variance(self, original, level, read_size, write_size):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
@@ -378,12 +446,20 @@
         self.assertEqual(dest.getvalue(), original)
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestDecompressor_decompressobj_fuzzing(unittest.TestCase):
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      chunk_sizes=strategies.data())
+class TestDecompressor_decompressobj_fuzzing(TestCase):
+    @hypothesis.settings(
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        chunk_sizes=strategies.data(),
+    )
     def test_random_input_sizes(self, original, level, chunk_sizes):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
@@ -402,13 +478,22 @@
 
             chunks.append(dobj.decompress(chunk))
 
-        self.assertEqual(b''.join(chunks), original)
+        self.assertEqual(b"".join(chunks), original)
 
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      write_size=strategies.integers(min_value=1,
-                                                     max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE),
-                      chunk_sizes=strategies.data())
+    @hypothesis.settings(
+        suppress_health_check=[
+            hypothesis.HealthCheck.large_base_example,
+            hypothesis.HealthCheck.too_slow,
+        ]
+    )
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        write_size=strategies.integers(
+            min_value=1, max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        ),
+        chunk_sizes=strategies.data(),
+    )
     def test_random_output_sizes(self, original, level, write_size, chunk_sizes):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
@@ -427,16 +512,18 @@
 
             chunks.append(dobj.decompress(chunk))
 
-        self.assertEqual(b''.join(chunks), original)
+        self.assertEqual(b"".join(chunks), original)
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
 @make_cffi
-class TestDecompressor_read_to_iter_fuzzing(unittest.TestCase):
-    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
-                      level=strategies.integers(min_value=1, max_value=5),
-                      read_size=strategies.integers(min_value=1, max_value=4096),
-                      write_size=strategies.integers(min_value=1, max_value=4096))
+class TestDecompressor_read_to_iter_fuzzing(TestCase):
+    @hypothesis.given(
+        original=strategies.sampled_from(random_input_data()),
+        level=strategies.integers(min_value=1, max_value=5),
+        read_size=strategies.integers(min_value=1, max_value=4096),
+        write_size=strategies.integers(min_value=1, max_value=4096),
+    )
     def test_read_write_size_variance(self, original, level, read_size, write_size):
         cctx = zstd.ZstdCompressor(level=level)
         frame = cctx.compress(original)
@@ -444,29 +531,33 @@
         source = io.BytesIO(frame)
 
         dctx = zstd.ZstdDecompressor()
-        chunks = list(dctx.read_to_iter(source, read_size=read_size, write_size=write_size))
+        chunks = list(
+            dctx.read_to_iter(source, read_size=read_size, write_size=write_size)
+        )
 
-        self.assertEqual(b''.join(chunks), original)
+        self.assertEqual(b"".join(chunks), original)
 
 
-@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
-class TestDecompressor_multi_decompress_to_buffer_fuzzing(unittest.TestCase):
-    @hypothesis.given(original=strategies.lists(strategies.sampled_from(random_input_data()),
-                                        min_size=1, max_size=1024),
-                threads=strategies.integers(min_value=1, max_value=8),
-                use_dict=strategies.booleans())
+@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
+class TestDecompressor_multi_decompress_to_buffer_fuzzing(TestCase):
+    @hypothesis.given(
+        original=strategies.lists(
+            strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
+        ),
+        threads=strategies.integers(min_value=1, max_value=8),
+        use_dict=strategies.booleans(),
+    )
     def test_data_equivalence(self, original, threads, use_dict):
         kwargs = {}
         if use_dict:
-            kwargs['dict_data'] = zstd.ZstdCompressionDict(original[0])
+            kwargs["dict_data"] = zstd.ZstdCompressionDict(original[0])
 
-        cctx = zstd.ZstdCompressor(level=1,
-                                   write_content_size=True,
-                                   write_checksum=True,
-                                   **kwargs)
+        cctx = zstd.ZstdCompressor(
+            level=1, write_content_size=True, write_checksum=True, **kwargs
+        )
 
-        if not hasattr(cctx, 'multi_compress_to_buffer'):
-            self.skipTest('multi_compress_to_buffer not available')
+        if not hasattr(cctx, "multi_compress_to_buffer"):
+            self.skipTest("multi_compress_to_buffer not available")
 
         frames_buffer = cctx.multi_compress_to_buffer(original, threads=-1)
 
--- a/contrib/python-zstandard/tests/test_estimate_sizes.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/test_estimate_sizes.py	Tue Jan 21 13:14:51 2020 -0500
@@ -2,14 +2,14 @@
 
 import zstandard as zstd
 
-from . common import (
+from .common import (
     make_cffi,
+    TestCase,
 )
 
 
 @make_cffi
-class TestSizes(unittest.TestCase):
+class TestSizes(TestCase):
     def test_decompression_size(self):
         size = zstd.estimate_decompression_context_size()
         self.assertGreater(size, 100000)
-
--- a/contrib/python-zstandard/tests/test_module_attributes.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/test_module_attributes.py	Tue Jan 21 13:14:51 2020 -0500
@@ -4,65 +4,66 @@
 
 import zstandard as zstd
 
-from . common import (
+from .common import (
     make_cffi,
+    TestCase,
 )
 
 
 @make_cffi
-class TestModuleAttributes(unittest.TestCase):
+class TestModuleAttributes(TestCase):
     def test_version(self):
-        self.assertEqual(zstd.ZSTD_VERSION, (1, 4, 3))
+        self.assertEqual(zstd.ZSTD_VERSION, (1, 4, 4))
 
-        self.assertEqual(zstd.__version__, '0.12.0')
+        self.assertEqual(zstd.__version__, "0.13.0")
 
     def test_constants(self):
         self.assertEqual(zstd.MAX_COMPRESSION_LEVEL, 22)
-        self.assertEqual(zstd.FRAME_HEADER, b'\x28\xb5\x2f\xfd')
+        self.assertEqual(zstd.FRAME_HEADER, b"\x28\xb5\x2f\xfd")
 
     def test_hasattr(self):
         attrs = (
-            'CONTENTSIZE_UNKNOWN',
-            'CONTENTSIZE_ERROR',
-            'COMPRESSION_RECOMMENDED_INPUT_SIZE',
-            'COMPRESSION_RECOMMENDED_OUTPUT_SIZE',
-            'DECOMPRESSION_RECOMMENDED_INPUT_SIZE',
-            'DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE',
-            'MAGIC_NUMBER',
-            'FLUSH_BLOCK',
-            'FLUSH_FRAME',
-            'BLOCKSIZELOG_MAX',
-            'BLOCKSIZE_MAX',
-            'WINDOWLOG_MIN',
-            'WINDOWLOG_MAX',
-            'CHAINLOG_MIN',
-            'CHAINLOG_MAX',
-            'HASHLOG_MIN',
-            'HASHLOG_MAX',
-            'HASHLOG3_MAX',
-            'MINMATCH_MIN',
-            'MINMATCH_MAX',
-            'SEARCHLOG_MIN',
-            'SEARCHLOG_MAX',
-            'SEARCHLENGTH_MIN',
-            'SEARCHLENGTH_MAX',
-            'TARGETLENGTH_MIN',
-            'TARGETLENGTH_MAX',
-            'LDM_MINMATCH_MIN',
-            'LDM_MINMATCH_MAX',
-            'LDM_BUCKETSIZELOG_MAX',
-            'STRATEGY_FAST',
-            'STRATEGY_DFAST',
-            'STRATEGY_GREEDY',
-            'STRATEGY_LAZY',
-            'STRATEGY_LAZY2',
-            'STRATEGY_BTLAZY2',
-            'STRATEGY_BTOPT',
-            'STRATEGY_BTULTRA',
-            'STRATEGY_BTULTRA2',
-            'DICT_TYPE_AUTO',
-            'DICT_TYPE_RAWCONTENT',
-            'DICT_TYPE_FULLDICT',
+            "CONTENTSIZE_UNKNOWN",
+            "CONTENTSIZE_ERROR",
+            "COMPRESSION_RECOMMENDED_INPUT_SIZE",
+            "COMPRESSION_RECOMMENDED_OUTPUT_SIZE",
+            "DECOMPRESSION_RECOMMENDED_INPUT_SIZE",
+            "DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE",
+            "MAGIC_NUMBER",
+            "FLUSH_BLOCK",
+            "FLUSH_FRAME",
+            "BLOCKSIZELOG_MAX",
+            "BLOCKSIZE_MAX",
+            "WINDOWLOG_MIN",
+            "WINDOWLOG_MAX",
+            "CHAINLOG_MIN",
+            "CHAINLOG_MAX",
+            "HASHLOG_MIN",
+            "HASHLOG_MAX",
+            "HASHLOG3_MAX",
+            "MINMATCH_MIN",
+            "MINMATCH_MAX",
+            "SEARCHLOG_MIN",
+            "SEARCHLOG_MAX",
+            "SEARCHLENGTH_MIN",
+            "SEARCHLENGTH_MAX",
+            "TARGETLENGTH_MIN",
+            "TARGETLENGTH_MAX",
+            "LDM_MINMATCH_MIN",
+            "LDM_MINMATCH_MAX",
+            "LDM_BUCKETSIZELOG_MAX",
+            "STRATEGY_FAST",
+            "STRATEGY_DFAST",
+            "STRATEGY_GREEDY",
+            "STRATEGY_LAZY",
+            "STRATEGY_LAZY2",
+            "STRATEGY_BTLAZY2",
+            "STRATEGY_BTOPT",
+            "STRATEGY_BTULTRA",
+            "STRATEGY_BTULTRA2",
+            "DICT_TYPE_AUTO",
+            "DICT_TYPE_RAWCONTENT",
+            "DICT_TYPE_FULLDICT",
         )
 
         for a in attrs:
--- a/contrib/python-zstandard/tests/test_train_dictionary.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/tests/test_train_dictionary.py	Tue Jan 21 13:14:51 2020 -0500
@@ -4,10 +4,11 @@
 
 import zstandard as zstd
 
-from . common import (
+from .common import (
     generate_samples,
     make_cffi,
     random_input_data,
+    TestCase,
 )
 
 if sys.version_info[0] >= 3:
@@ -17,24 +18,24 @@
 
 
 @make_cffi
-class TestTrainDictionary(unittest.TestCase):
+class TestTrainDictionary(TestCase):
     def test_no_args(self):
         with self.assertRaises(TypeError):
             zstd.train_dictionary()
 
     def test_bad_args(self):
         with self.assertRaises(TypeError):
-            zstd.train_dictionary(8192, u'foo')
+            zstd.train_dictionary(8192, u"foo")
 
         with self.assertRaises(ValueError):
-            zstd.train_dictionary(8192, [u'foo'])
+            zstd.train_dictionary(8192, [u"foo"])
 
     def test_no_params(self):
         d = zstd.train_dictionary(8192, random_input_data())
         self.assertIsInstance(d.dict_id(), int_type)
 
         # The dictionary ID may be different across platforms.
-        expected = b'\x37\xa4\x30\xec' + struct.pack('<I', d.dict_id())
+        expected = b"\x37\xa4\x30\xec" + struct.pack("<I", d.dict_id())
 
         data = d.as_bytes()
         self.assertEqual(data[0:8], expected)
@@ -44,46 +45,48 @@
         self.assertIsInstance(d.dict_id(), int_type)
 
         data = d.as_bytes()
-        self.assertEqual(data[0:4], b'\x37\xa4\x30\xec')
+        self.assertEqual(data[0:4], b"\x37\xa4\x30\xec")
 
         self.assertEqual(d.k, 64)
         self.assertEqual(d.d, 16)
 
     def test_set_dict_id(self):
-        d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16,
-                                  dict_id=42)
+        d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16, dict_id=42)
         self.assertEqual(d.dict_id(), 42)
 
     def test_optimize(self):
-        d = zstd.train_dictionary(8192, generate_samples(), threads=-1, steps=1,
-                                  d=16)
+        d = zstd.train_dictionary(8192, generate_samples(), threads=-1, steps=1, d=16)
 
         # This varies by platform.
         self.assertIn(d.k, (50, 2000))
         self.assertEqual(d.d, 16)
 
+
 @make_cffi
-class TestCompressionDict(unittest.TestCase):
+class TestCompressionDict(TestCase):
     def test_bad_mode(self):
-        with self.assertRaisesRegexp(ValueError, 'invalid dictionary load mode'):
-            zstd.ZstdCompressionDict(b'foo', dict_type=42)
+        with self.assertRaisesRegex(ValueError, "invalid dictionary load mode"):
+            zstd.ZstdCompressionDict(b"foo", dict_type=42)
 
     def test_bad_precompute_compress(self):
         d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16)
 
-        with self.assertRaisesRegexp(ValueError, 'must specify one of level or '):
+        with self.assertRaisesRegex(ValueError, "must specify one of level or "):
             d.precompute_compress()
 
-        with self.assertRaisesRegexp(ValueError, 'must only specify one of level or '):
-            d.precompute_compress(level=3,
-                                  compression_params=zstd.CompressionParameters())
+        with self.assertRaisesRegex(ValueError, "must only specify one of level or "):
+            d.precompute_compress(
+                level=3, compression_params=zstd.CompressionParameters()
+            )
 
     def test_precompute_compress_rawcontent(self):
-        d = zstd.ZstdCompressionDict(b'dictcontent' * 64,
-                                     dict_type=zstd.DICT_TYPE_RAWCONTENT)
+        d = zstd.ZstdCompressionDict(
+            b"dictcontent" * 64, dict_type=zstd.DICT_TYPE_RAWCONTENT
+        )
         d.precompute_compress(level=1)
 
-        d = zstd.ZstdCompressionDict(b'dictcontent' * 64,
-                                     dict_type=zstd.DICT_TYPE_FULLDICT)
-        with self.assertRaisesRegexp(zstd.ZstdError, 'unable to precompute dictionary'):
+        d = zstd.ZstdCompressionDict(
+            b"dictcontent" * 64, dict_type=zstd.DICT_TYPE_FULLDICT
+        )
+        with self.assertRaisesRegex(zstd.ZstdError, "unable to precompute dictionary"):
             d.precompute_compress(level=1)
--- a/contrib/python-zstandard/zstandard/__init__.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstandard/__init__.py	Tue Jan 21 13:14:51 2020 -0500
@@ -28,38 +28,48 @@
 # defining a variable and `setup.py` could write the file with whatever
 # policy was specified at build time. Until someone needs it, we go with
 # the hacky but simple environment variable approach.
-_module_policy = os.environ.get('PYTHON_ZSTANDARD_IMPORT_POLICY', 'default')
+_module_policy = os.environ.get("PYTHON_ZSTANDARD_IMPORT_POLICY", "default")
 
-if _module_policy == 'default':
-    if platform.python_implementation() in ('CPython',):
+if _module_policy == "default":
+    if platform.python_implementation() in ("CPython",):
         from zstd import *
-        backend = 'cext'
-    elif platform.python_implementation() in ('PyPy',):
+
+        backend = "cext"
+    elif platform.python_implementation() in ("PyPy",):
         from .cffi import *
-        backend = 'cffi'
+
+        backend = "cffi"
     else:
         try:
             from zstd import *
-            backend = 'cext'
+
+            backend = "cext"
         except ImportError:
             from .cffi import *
-            backend = 'cffi'
-elif _module_policy == 'cffi_fallback':
+
+            backend = "cffi"
+elif _module_policy == "cffi_fallback":
     try:
         from zstd import *
-        backend = 'cext'
+
+        backend = "cext"
     except ImportError:
         from .cffi import *
-        backend = 'cffi'
-elif _module_policy == 'cext':
+
+        backend = "cffi"
+elif _module_policy == "cext":
     from zstd import *
-    backend = 'cext'
-elif _module_policy == 'cffi':
+
+    backend = "cext"
+elif _module_policy == "cffi":
     from .cffi import *
-    backend = 'cffi'
+
+    backend = "cffi"
 else:
-    raise ImportError('unknown module import policy: %s; use default, cffi_fallback, '
-                      'cext, or cffi' % _module_policy)
+    raise ImportError(
+        "unknown module import policy: %s; use default, cffi_fallback, "
+        "cext, or cffi" % _module_policy
+    )
 
 # Keep this in sync with python-zstandard.h.
-__version__ = '0.12.0'
+__version__ = "0.13.0"
--- a/contrib/python-zstandard/zstandard/cffi.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstandard/cffi.py	Tue Jan 21 13:14:51 2020 -0500
@@ -14,68 +14,67 @@
     #'BufferSegments',
     #'BufferWithSegments',
     #'BufferWithSegmentsCollection',
-    'CompressionParameters',
-    'ZstdCompressionDict',
-    'ZstdCompressionParameters',
-    'ZstdCompressor',
-    'ZstdError',
-    'ZstdDecompressor',
-    'FrameParameters',
-    'estimate_decompression_context_size',
-    'frame_content_size',
-    'frame_header_size',
-    'get_frame_parameters',
-    'train_dictionary',
-
+    "CompressionParameters",
+    "ZstdCompressionDict",
+    "ZstdCompressionParameters",
+    "ZstdCompressor",
+    "ZstdError",
+    "ZstdDecompressor",
+    "FrameParameters",
+    "estimate_decompression_context_size",
+    "frame_content_size",
+    "frame_header_size",
+    "get_frame_parameters",
+    "train_dictionary",
     # Constants.
-    'FLUSH_BLOCK',
-    'FLUSH_FRAME',
-    'COMPRESSOBJ_FLUSH_FINISH',
-    'COMPRESSOBJ_FLUSH_BLOCK',
-    'ZSTD_VERSION',
-    'FRAME_HEADER',
-    'CONTENTSIZE_UNKNOWN',
-    'CONTENTSIZE_ERROR',
-    'MAX_COMPRESSION_LEVEL',
-    'COMPRESSION_RECOMMENDED_INPUT_SIZE',
-    'COMPRESSION_RECOMMENDED_OUTPUT_SIZE',
-    'DECOMPRESSION_RECOMMENDED_INPUT_SIZE',
-    'DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE',
-    'MAGIC_NUMBER',
-    'BLOCKSIZELOG_MAX',
-    'BLOCKSIZE_MAX',
-    'WINDOWLOG_MIN',
-    'WINDOWLOG_MAX',
-    'CHAINLOG_MIN',
-    'CHAINLOG_MAX',
-    'HASHLOG_MIN',
-    'HASHLOG_MAX',
-    'HASHLOG3_MAX',
-    'MINMATCH_MIN',
-    'MINMATCH_MAX',
-    'SEARCHLOG_MIN',
-    'SEARCHLOG_MAX',
-    'SEARCHLENGTH_MIN',
-    'SEARCHLENGTH_MAX',
-    'TARGETLENGTH_MIN',
-    'TARGETLENGTH_MAX',
-    'LDM_MINMATCH_MIN',
-    'LDM_MINMATCH_MAX',
-    'LDM_BUCKETSIZELOG_MAX',
-    'STRATEGY_FAST',
-    'STRATEGY_DFAST',
-    'STRATEGY_GREEDY',
-    'STRATEGY_LAZY',
-    'STRATEGY_LAZY2',
-    'STRATEGY_BTLAZY2',
-    'STRATEGY_BTOPT',
-    'STRATEGY_BTULTRA',
-    'STRATEGY_BTULTRA2',
-    'DICT_TYPE_AUTO',
-    'DICT_TYPE_RAWCONTENT',
-    'DICT_TYPE_FULLDICT',
-    'FORMAT_ZSTD1',
-    'FORMAT_ZSTD1_MAGICLESS',
+    "FLUSH_BLOCK",
+    "FLUSH_FRAME",
+    "COMPRESSOBJ_FLUSH_FINISH",
+    "COMPRESSOBJ_FLUSH_BLOCK",
+    "ZSTD_VERSION",
+    "FRAME_HEADER",
+    "CONTENTSIZE_UNKNOWN",
+    "CONTENTSIZE_ERROR",
+    "MAX_COMPRESSION_LEVEL",
+    "COMPRESSION_RECOMMENDED_INPUT_SIZE",
+    "COMPRESSION_RECOMMENDED_OUTPUT_SIZE",
+    "DECOMPRESSION_RECOMMENDED_INPUT_SIZE",
+    "DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE",
+    "MAGIC_NUMBER",
+    "BLOCKSIZELOG_MAX",
+    "BLOCKSIZE_MAX",
+    "WINDOWLOG_MIN",
+    "WINDOWLOG_MAX",
+    "CHAINLOG_MIN",
+    "CHAINLOG_MAX",
+    "HASHLOG_MIN",
+    "HASHLOG_MAX",
+    "HASHLOG3_MAX",
+    "MINMATCH_MIN",
+    "MINMATCH_MAX",
+    "SEARCHLOG_MIN",
+    "SEARCHLOG_MAX",
+    "SEARCHLENGTH_MIN",
+    "SEARCHLENGTH_MAX",
+    "TARGETLENGTH_MIN",
+    "TARGETLENGTH_MAX",
+    "LDM_MINMATCH_MIN",
+    "LDM_MINMATCH_MAX",
+    "LDM_BUCKETSIZELOG_MAX",
+    "STRATEGY_FAST",
+    "STRATEGY_DFAST",
+    "STRATEGY_GREEDY",
+    "STRATEGY_LAZY",
+    "STRATEGY_LAZY2",
+    "STRATEGY_BTLAZY2",
+    "STRATEGY_BTOPT",
+    "STRATEGY_BTULTRA",
+    "STRATEGY_BTULTRA2",
+    "DICT_TYPE_AUTO",
+    "DICT_TYPE_RAWCONTENT",
+    "DICT_TYPE_FULLDICT",
+    "FORMAT_ZSTD1",
+    "FORMAT_ZSTD1_MAGICLESS",
 ]
 
 import io
@@ -105,10 +104,14 @@
 
 MAX_COMPRESSION_LEVEL = lib.ZSTD_maxCLevel()
 MAGIC_NUMBER = lib.ZSTD_MAGICNUMBER
-FRAME_HEADER = b'\x28\xb5\x2f\xfd'
+FRAME_HEADER = b"\x28\xb5\x2f\xfd"
 CONTENTSIZE_UNKNOWN = lib.ZSTD_CONTENTSIZE_UNKNOWN
 CONTENTSIZE_ERROR = lib.ZSTD_CONTENTSIZE_ERROR
-ZSTD_VERSION = (lib.ZSTD_VERSION_MAJOR, lib.ZSTD_VERSION_MINOR, lib.ZSTD_VERSION_RELEASE)
+ZSTD_VERSION = (
+    lib.ZSTD_VERSION_MAJOR,
+    lib.ZSTD_VERSION_MINOR,
+    lib.ZSTD_VERSION_RELEASE,
+)
 
 BLOCKSIZELOG_MAX = lib.ZSTD_BLOCKSIZELOG_MAX
 BLOCKSIZE_MAX = lib.ZSTD_BLOCKSIZE_MAX
@@ -165,9 +168,9 @@
     # Linux.
     try:
         if sys.version_info[0] == 2:
-            return os.sysconf(b'SC_NPROCESSORS_ONLN')
+            return os.sysconf(b"SC_NPROCESSORS_ONLN")
         else:
-            return os.sysconf(u'SC_NPROCESSORS_ONLN')
+            return os.sysconf("SC_NPROCESSORS_ONLN")
     except (AttributeError, ValueError):
         pass
 
@@ -183,7 +186,8 @@
     # Resolves to bytes on Python 2 and 3. We use the string for formatting
     # into error messages, which will be literal unicode. So convert it to
     # unicode.
-    return ffi.string(lib.ZSTD_getErrorName(zresult)).decode('utf-8')
+    return ffi.string(lib.ZSTD_getErrorName(zresult)).decode("utf-8")
+
 
 def _make_cctx_params(params):
     res = lib.ZSTD_createCCtxParams()
@@ -221,19 +225,20 @@
 
     return res
 
+
 class ZstdCompressionParameters(object):
     @staticmethod
     def from_level(level, source_size=0, dict_size=0, **kwargs):
         params = lib.ZSTD_getCParams(level, source_size, dict_size)
 
         args = {
-            'window_log': 'windowLog',
-            'chain_log': 'chainLog',
-            'hash_log': 'hashLog',
-            'search_log': 'searchLog',
-            'min_match': 'minMatch',
-            'target_length': 'targetLength',
-            'compression_strategy': 'strategy',
+            "window_log": "windowLog",
+            "chain_log": "chainLog",
+            "hash_log": "hashLog",
+            "search_log": "searchLog",
+            "min_match": "minMatch",
+            "target_length": "targetLength",
+            "compression_strategy": "strategy",
         }
 
         for arg, attr in args.items():
@@ -242,14 +247,33 @@
 
         return ZstdCompressionParameters(**kwargs)
 
-    def __init__(self, format=0, compression_level=0, window_log=0, hash_log=0,
-                 chain_log=0, search_log=0, min_match=0, target_length=0,
-                 strategy=-1, compression_strategy=-1,
-                 write_content_size=1, write_checksum=0,
-                 write_dict_id=0, job_size=0, overlap_log=-1,
-                 overlap_size_log=-1, force_max_window=0, enable_ldm=0,
-                 ldm_hash_log=0, ldm_min_match=0, ldm_bucket_size_log=0,
-                 ldm_hash_rate_log=-1, ldm_hash_every_log=-1, threads=0):
+    def __init__(
+        self,
+        format=0,
+        compression_level=0,
+        window_log=0,
+        hash_log=0,
+        chain_log=0,
+        search_log=0,
+        min_match=0,
+        target_length=0,
+        strategy=-1,
+        compression_strategy=-1,
+        write_content_size=1,
+        write_checksum=0,
+        write_dict_id=0,
+        job_size=0,
+        overlap_log=-1,
+        overlap_size_log=-1,
+        force_max_window=0,
+        enable_ldm=0,
+        ldm_hash_log=0,
+        ldm_min_match=0,
+        ldm_bucket_size_log=0,
+        ldm_hash_rate_log=-1,
+        ldm_hash_every_log=-1,
+        threads=0,
+    ):
 
         params = lib.ZSTD_createCCtxParams()
         if params == ffi.NULL:
@@ -267,7 +291,9 @@
         _set_compression_parameter(params, lib.ZSTD_c_nbWorkers, threads)
 
         _set_compression_parameter(params, lib.ZSTD_c_format, format)
-        _set_compression_parameter(params, lib.ZSTD_c_compressionLevel, compression_level)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_compressionLevel, compression_level
+        )
         _set_compression_parameter(params, lib.ZSTD_c_windowLog, window_log)
         _set_compression_parameter(params, lib.ZSTD_c_hashLog, hash_log)
         _set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log)
@@ -276,7 +302,7 @@
         _set_compression_parameter(params, lib.ZSTD_c_targetLength, target_length)
 
         if strategy != -1 and compression_strategy != -1:
-            raise ValueError('cannot specify both compression_strategy and strategy')
+            raise ValueError("cannot specify both compression_strategy and strategy")
 
         if compression_strategy != -1:
             strategy = compression_strategy
@@ -284,13 +310,15 @@
             strategy = 0
 
         _set_compression_parameter(params, lib.ZSTD_c_strategy, strategy)
-        _set_compression_parameter(params, lib.ZSTD_c_contentSizeFlag, write_content_size)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_contentSizeFlag, write_content_size
+        )
         _set_compression_parameter(params, lib.ZSTD_c_checksumFlag, write_checksum)
         _set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id)
         _set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size)
 
         if overlap_log != -1 and overlap_size_log != -1:
-            raise ValueError('cannot specify both overlap_log and overlap_size_log')
+            raise ValueError("cannot specify both overlap_log and overlap_size_log")
 
         if overlap_size_log != -1:
             overlap_log = overlap_size_log
@@ -299,13 +327,19 @@
 
         _set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log)
         _set_compression_parameter(params, lib.ZSTD_c_forceMaxWindow, force_max_window)
-        _set_compression_parameter(params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm
+        )
         _set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log)
         _set_compression_parameter(params, lib.ZSTD_c_ldmMinMatch, ldm_min_match)
-        _set_compression_parameter(params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log)
+        _set_compression_parameter(
+            params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log
+        )
 
         if ldm_hash_rate_log != -1 and ldm_hash_every_log != -1:
-            raise ValueError('cannot specify both ldm_hash_rate_log and ldm_hash_every_log')
+            raise ValueError(
+                "cannot specify both ldm_hash_rate_log and ldm_hash_every_log"
+            )
 
         if ldm_hash_every_log != -1:
             ldm_hash_rate_log = ldm_hash_every_log
@@ -380,7 +414,9 @@
 
     @property
     def enable_ldm(self):
-        return _get_compression_parameter(self._params, lib.ZSTD_c_enableLongDistanceMatching)
+        return _get_compression_parameter(
+            self._params, lib.ZSTD_c_enableLongDistanceMatching
+        )
 
     @property
     def ldm_hash_log(self):
@@ -409,8 +445,10 @@
     def estimated_compression_context_size(self):
         return lib.ZSTD_estimateCCtxSize_usingCCtxParams(self._params)
 
+
 CompressionParameters = ZstdCompressionParameters
 
+
 def estimate_decompression_context_size():
     return lib.ZSTD_estimateDCtxSize()
 
@@ -418,24 +456,25 @@
 def _set_compression_parameter(params, param, value):
     zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value)
     if lib.ZSTD_isError(zresult):
-        raise ZstdError('unable to set compression context parameter: %s' %
-                        _zstd_error(zresult))
+        raise ZstdError(
+            "unable to set compression context parameter: %s" % _zstd_error(zresult)
+        )
 
 
 def _get_compression_parameter(params, param):
-    result = ffi.new('int *')
+    result = ffi.new("int *")
 
     zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result)
     if lib.ZSTD_isError(zresult):
-        raise ZstdError('unable to get compression context parameter: %s' %
-                        _zstd_error(zresult))
+        raise ZstdError(
+            "unable to get compression context parameter: %s" % _zstd_error(zresult)
+        )
 
     return result[0]
 
 
 class ZstdCompressionWriter(object):
-    def __init__(self, compressor, writer, source_size, write_size,
-                 write_return_read):
+    def __init__(self, compressor, writer, source_size, write_size, write_return_read):
         self._compressor = compressor
         self._writer = writer
         self._write_size = write_size
@@ -444,24 +483,22 @@
         self._closed = False
         self._bytes_compressed = 0
 
-        self._dst_buffer = ffi.new('char[]', write_size)
-        self._out_buffer = ffi.new('ZSTD_outBuffer *')
+        self._dst_buffer = ffi.new("char[]", write_size)
+        self._out_buffer = ffi.new("ZSTD_outBuffer *")
         self._out_buffer.dst = self._dst_buffer
         self._out_buffer.size = len(self._dst_buffer)
         self._out_buffer.pos = 0
 
-        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx,
-                                                  source_size)
+        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error setting source size: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
 
     def __enter__(self):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         if self._entered:
-            raise ZstdError('cannot __enter__ multiple times')
+            raise ZstdError("cannot __enter__ multiple times")
 
         self._entered = True
         return self
@@ -480,11 +517,11 @@
         return lib.ZSTD_sizeof_CCtx(self._compressor._cctx)
 
     def fileno(self):
-        f = getattr(self._writer, 'fileno', None)
+        f = getattr(self._writer, "fileno", None)
         if f:
             return f()
         else:
-            raise OSError('fileno not available on underlying writer')
+            raise OSError("fileno not available on underlying writer")
 
     def close(self):
         if self._closed:
@@ -496,7 +533,7 @@
             self._closed = True
 
         # Call close() on underlying stream as well.
-        f = getattr(self._writer, 'close', None)
+        f = getattr(self._writer, "close", None)
         if f:
             f()
 
@@ -529,7 +566,7 @@
         return True
 
     def writelines(self, lines):
-        raise NotImplementedError('writelines() is not yet implemented')
+        raise NotImplementedError("writelines() is not yet implemented")
 
     def read(self, size=-1):
         raise io.UnsupportedOperation()
@@ -542,13 +579,13 @@
 
     def write(self, data):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         total_write = 0
 
         data_buffer = ffi.from_buffer(data)
 
-        in_buffer = ffi.new('ZSTD_inBuffer *')
+        in_buffer = ffi.new("ZSTD_inBuffer *")
         in_buffer.src = data_buffer
         in_buffer.size = len(data_buffer)
         in_buffer.pos = 0
@@ -557,12 +594,11 @@
         out_buffer.pos = 0
 
         while in_buffer.pos < in_buffer.size:
-            zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                               out_buffer, in_buffer,
-                                               lib.ZSTD_e_continue)
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+            )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('zstd compress error: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
 
             if out_buffer.pos:
                 self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
@@ -581,28 +617,27 @@
         elif flush_mode == FLUSH_FRAME:
             flush = lib.ZSTD_e_end
         else:
-            raise ValueError('unknown flush_mode: %r' % flush_mode)
+            raise ValueError("unknown flush_mode: %r" % flush_mode)
 
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         total_write = 0
 
         out_buffer = self._out_buffer
         out_buffer.pos = 0
 
-        in_buffer = ffi.new('ZSTD_inBuffer *')
+        in_buffer = ffi.new("ZSTD_inBuffer *")
         in_buffer.src = ffi.NULL
         in_buffer.size = 0
         in_buffer.pos = 0
 
         while True:
-            zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                               out_buffer, in_buffer,
-                                               flush)
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, out_buffer, in_buffer, flush
+            )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('zstd compress error: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
 
             if out_buffer.pos:
                 self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
@@ -622,10 +657,10 @@
 class ZstdCompressionObj(object):
     def compress(self, data):
         if self._finished:
-            raise ZstdError('cannot call compress() after compressor finished')
+            raise ZstdError("cannot call compress() after compressor finished")
 
         data_buffer = ffi.from_buffer(data)
-        source = ffi.new('ZSTD_inBuffer *')
+        source = ffi.new("ZSTD_inBuffer *")
         source.src = data_buffer
         source.size = len(data_buffer)
         source.pos = 0
@@ -633,26 +668,24 @@
         chunks = []
 
         while source.pos < len(data):
-            zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                               self._out,
-                                               source,
-                                               lib.ZSTD_e_continue)
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, self._out, source, lib.ZSTD_e_continue
+            )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('zstd compress error: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
 
             if self._out.pos:
                 chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
                 self._out.pos = 0
 
-        return b''.join(chunks)
+        return b"".join(chunks)
 
     def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH):
         if flush_mode not in (COMPRESSOBJ_FLUSH_FINISH, COMPRESSOBJ_FLUSH_BLOCK):
-            raise ValueError('flush mode not recognized')
+            raise ValueError("flush mode not recognized")
 
         if self._finished:
-            raise ZstdError('compressor object already finished')
+            raise ZstdError("compressor object already finished")
 
         if flush_mode == COMPRESSOBJ_FLUSH_BLOCK:
             z_flush_mode = lib.ZSTD_e_flush
@@ -660,11 +693,11 @@
             z_flush_mode = lib.ZSTD_e_end
             self._finished = True
         else:
-            raise ZstdError('unhandled flush mode')
+            raise ZstdError("unhandled flush mode")
 
         assert self._out.pos == 0
 
-        in_buffer = ffi.new('ZSTD_inBuffer *')
+        in_buffer = ffi.new("ZSTD_inBuffer *")
         in_buffer.src = ffi.NULL
         in_buffer.size = 0
         in_buffer.pos = 0
@@ -672,13 +705,13 @@
         chunks = []
 
         while True:
-            zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                               self._out,
-                                               in_buffer,
-                                               z_flush_mode)
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, self._out, in_buffer, z_flush_mode
+            )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('error ending compression stream: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError(
+                    "error ending compression stream: %s" % _zstd_error(zresult)
+                )
 
             if self._out.pos:
                 chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
@@ -687,19 +720,19 @@
             if not zresult:
                 break
 
-        return b''.join(chunks)
+        return b"".join(chunks)
 
 
 class ZstdCompressionChunker(object):
     def __init__(self, compressor, chunk_size):
         self._compressor = compressor
-        self._out = ffi.new('ZSTD_outBuffer *')
-        self._dst_buffer = ffi.new('char[]', chunk_size)
+        self._out = ffi.new("ZSTD_outBuffer *")
+        self._dst_buffer = ffi.new("char[]", chunk_size)
         self._out.dst = self._dst_buffer
         self._out.size = chunk_size
         self._out.pos = 0
 
-        self._in = ffi.new('ZSTD_inBuffer *')
+        self._in = ffi.new("ZSTD_inBuffer *")
         self._in.src = ffi.NULL
         self._in.size = 0
         self._in.pos = 0
@@ -707,11 +740,13 @@
 
     def compress(self, data):
         if self._finished:
-            raise ZstdError('cannot call compress() after compression finished')
+            raise ZstdError("cannot call compress() after compression finished")
 
         if self._in.src != ffi.NULL:
-            raise ZstdError('cannot perform operation before consuming output '
-                            'from previous operation')
+            raise ZstdError(
+                "cannot perform operation before consuming output "
+                "from previous operation"
+            )
 
         data_buffer = ffi.from_buffer(data)
 
@@ -723,10 +758,9 @@
         self._in.pos = 0
 
         while self._in.pos < self._in.size:
-            zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                               self._out,
-                                               self._in,
-                                               lib.ZSTD_e_continue)
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, self._out, self._in, lib.ZSTD_e_continue
+            )
 
             if self._in.pos == self._in.size:
                 self._in.src = ffi.NULL
@@ -734,8 +768,7 @@
                 self._in.pos = 0
 
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('zstd compress error: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
 
             if self._out.pos == self._out.size:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -743,18 +776,19 @@
 
     def flush(self):
         if self._finished:
-            raise ZstdError('cannot call flush() after compression finished')
+            raise ZstdError("cannot call flush() after compression finished")
 
         if self._in.src != ffi.NULL:
-            raise ZstdError('cannot call flush() before consuming output from '
-                            'previous operation')
+            raise ZstdError(
+                "cannot call flush() before consuming output from " "previous operation"
+            )
 
         while True:
-            zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                               self._out, self._in,
-                                               lib.ZSTD_e_flush)
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush
+            )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('zstd compress error: %s' % _zstd_error(zresult))
+                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
 
             if self._out.pos:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -765,18 +799,20 @@
 
     def finish(self):
         if self._finished:
-            raise ZstdError('cannot call finish() after compression finished')
+            raise ZstdError("cannot call finish() after compression finished")
 
         if self._in.src != ffi.NULL:
-            raise ZstdError('cannot call finish() before consuming output from '
-                            'previous operation')
+            raise ZstdError(
+                "cannot call finish() before consuming output from "
+                "previous operation"
+            )
 
         while True:
-            zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                               self._out, self._in,
-                                               lib.ZSTD_e_end)
+            zresult = lib.ZSTD_compressStream2(
+                self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end
+            )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('zstd compress error: %s' % _zstd_error(zresult))
+                raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
 
             if self._out.pos:
                 yield ffi.buffer(self._out.dst, self._out.pos)[:]
@@ -798,13 +834,13 @@
         self._finished_input = False
         self._finished_output = False
 
-        self._in_buffer = ffi.new('ZSTD_inBuffer *')
+        self._in_buffer = ffi.new("ZSTD_inBuffer *")
         # Holds a ref so backing bytes in self._in_buffer stay alive.
         self._source_buffer = None
 
     def __enter__(self):
         if self._entered:
-            raise ValueError('cannot __enter__ multiple times')
+            raise ValueError("cannot __enter__ multiple times")
 
         self._entered = True
         return self
@@ -833,10 +869,10 @@
         raise io.UnsupportedOperation()
 
     def write(self, data):
-        raise OSError('stream is not writable')
+        raise OSError("stream is not writable")
 
     def writelines(self, ignored):
-        raise OSError('stream is not writable')
+        raise OSError("stream is not writable")
 
     def isatty(self):
         return False
@@ -865,7 +901,7 @@
 
             chunks.append(chunk)
 
-        return b''.join(chunks)
+        return b"".join(chunks)
 
     def __iter__(self):
         raise io.UnsupportedOperation()
@@ -879,7 +915,7 @@
         if self._finished_input:
             return
 
-        if hasattr(self._source, 'read'):
+        if hasattr(self._source, "read"):
             data = self._source.read(self._read_size)
 
             if not data:
@@ -902,9 +938,9 @@
 
         old_pos = out_buffer.pos
 
-        zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                           out_buffer, self._in_buffer,
-                                           lib.ZSTD_e_continue)
+        zresult = lib.ZSTD_compressStream2(
+            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_continue
+        )
 
         self._bytes_compressed += out_buffer.pos - old_pos
 
@@ -914,31 +950,30 @@
             self._in_buffer.size = 0
             self._source_buffer = None
 
-            if not hasattr(self._source, 'read'):
+            if not hasattr(self._source, "read"):
                 self._finished_input = True
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('zstd compress error: %s',
-                            _zstd_error(zresult))
+            raise ZstdError("zstd compress error: %s", _zstd_error(zresult))
 
         return out_buffer.pos and out_buffer.pos == out_buffer.size
 
     def read(self, size=-1):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         if size < -1:
-            raise ValueError('cannot read negative amounts less than -1')
+            raise ValueError("cannot read negative amounts less than -1")
 
         if size == -1:
             return self.readall()
 
         if self._finished_output or size == 0:
-            return b''
+            return b""
 
         # Need a dedicated ref to dest buffer otherwise it gets collected.
-        dst_buffer = ffi.new('char[]', size)
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        dst_buffer = ffi.new("char[]", size)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
         out_buffer.dst = dst_buffer
         out_buffer.size = size
         out_buffer.pos = 0
@@ -955,15 +990,14 @@
         # EOF
         old_pos = out_buffer.pos
 
-        zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                           out_buffer, self._in_buffer,
-                                           lib.ZSTD_e_end)
+        zresult = lib.ZSTD_compressStream2(
+            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+        )
 
         self._bytes_compressed += out_buffer.pos - old_pos
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error ending compression stream: %s',
-                            _zstd_error(zresult))
+            raise ZstdError("error ending compression stream: %s", _zstd_error(zresult))
 
         if zresult == 0:
             self._finished_output = True
@@ -972,20 +1006,20 @@
 
     def read1(self, size=-1):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         if size < -1:
-            raise ValueError('cannot read negative amounts less than -1')
+            raise ValueError("cannot read negative amounts less than -1")
 
         if self._finished_output or size == 0:
-            return b''
+            return b""
 
         # -1 returns arbitrary number of bytes.
         if size == -1:
             size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
 
-        dst_buffer = ffi.new('char[]', size)
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        dst_buffer = ffi.new("char[]", size)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
         out_buffer.dst = dst_buffer
         out_buffer.size = size
         out_buffer.pos = 0
@@ -1020,15 +1054,16 @@
         # EOF.
         old_pos = out_buffer.pos
 
-        zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                           out_buffer, self._in_buffer,
-                                           lib.ZSTD_e_end)
+        zresult = lib.ZSTD_compressStream2(
+            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+        )
 
         self._bytes_compressed += out_buffer.pos - old_pos
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error ending compression stream: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError(
+                "error ending compression stream: %s" % _zstd_error(zresult)
+            )
 
         if zresult == 0:
             self._finished_output = True
@@ -1037,15 +1072,15 @@
 
     def readinto(self, b):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         if self._finished_output:
             return 0
 
         # TODO use writable=True once we require CFFI >= 1.12.
         dest_buffer = ffi.from_buffer(b)
-        ffi.memmove(b, b'', 0)
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        ffi.memmove(b, b"", 0)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
         out_buffer.dst = dest_buffer
         out_buffer.size = len(dest_buffer)
         out_buffer.pos = 0
@@ -1060,15 +1095,14 @@
 
         # EOF.
         old_pos = out_buffer.pos
-        zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                           out_buffer, self._in_buffer,
-                                           lib.ZSTD_e_end)
+        zresult = lib.ZSTD_compressStream2(
+            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+        )
 
         self._bytes_compressed += out_buffer.pos - old_pos
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error ending compression stream: %s',
-                            _zstd_error(zresult))
+            raise ZstdError("error ending compression stream: %s", _zstd_error(zresult))
 
         if zresult == 0:
             self._finished_output = True
@@ -1077,16 +1111,16 @@
 
     def readinto1(self, b):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         if self._finished_output:
             return 0
 
         # TODO use writable=True once we require CFFI >= 1.12.
         dest_buffer = ffi.from_buffer(b)
-        ffi.memmove(b, b'', 0)
-
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        ffi.memmove(b, b"", 0)
+
+        out_buffer = ffi.new("ZSTD_outBuffer *")
         out_buffer.dst = dest_buffer
         out_buffer.size = len(dest_buffer)
         out_buffer.pos = 0
@@ -1107,15 +1141,16 @@
         # EOF.
         old_pos = out_buffer.pos
 
-        zresult = lib.ZSTD_compressStream2(self._compressor._cctx,
-                                           out_buffer, self._in_buffer,
-                                           lib.ZSTD_e_end)
+        zresult = lib.ZSTD_compressStream2(
+            self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end
+        )
 
         self._bytes_compressed += out_buffer.pos - old_pos
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error ending compression stream: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError(
+                "error ending compression stream: %s" % _zstd_error(zresult)
+            )
 
         if zresult == 0:
             self._finished_output = True
@@ -1124,29 +1159,35 @@
 
 
 class ZstdCompressor(object):
-    def __init__(self, level=3, dict_data=None, compression_params=None,
-                 write_checksum=None, write_content_size=None,
-                 write_dict_id=None, threads=0):
+    def __init__(
+        self,
+        level=3,
+        dict_data=None,
+        compression_params=None,
+        write_checksum=None,
+        write_content_size=None,
+        write_dict_id=None,
+        threads=0,
+    ):
         if level > lib.ZSTD_maxCLevel():
-            raise ValueError('level must be less than %d' % lib.ZSTD_maxCLevel())
+            raise ValueError("level must be less than %d" % lib.ZSTD_maxCLevel())
 
         if threads < 0:
             threads = _cpu_count()
 
         if compression_params and write_checksum is not None:
-            raise ValueError('cannot define compression_params and '
-                             'write_checksum')
+            raise ValueError("cannot define compression_params and " "write_checksum")
 
         if compression_params and write_content_size is not None:
-            raise ValueError('cannot define compression_params and '
-                             'write_content_size')
+            raise ValueError(
+                "cannot define compression_params and " "write_content_size"
+            )
 
         if compression_params and write_dict_id is not None:
-            raise ValueError('cannot define compression_params and '
-                             'write_dict_id')
+            raise ValueError("cannot define compression_params and " "write_dict_id")
 
         if compression_params and threads:
-            raise ValueError('cannot define compression_params and threads')
+            raise ValueError("cannot define compression_params and threads")
 
         if compression_params:
             self._params = _make_cctx_params(compression_params)
@@ -1160,27 +1201,24 @@
 
             self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams)
 
-            _set_compression_parameter(self._params,
-                                       lib.ZSTD_c_compressionLevel,
-                                       level)
+            _set_compression_parameter(self._params, lib.ZSTD_c_compressionLevel, level)
 
             _set_compression_parameter(
                 self._params,
                 lib.ZSTD_c_contentSizeFlag,
-                write_content_size if write_content_size is not None else 1)
-
-            _set_compression_parameter(self._params,
-                                       lib.ZSTD_c_checksumFlag,
-                                       1 if write_checksum else 0)
-
-            _set_compression_parameter(self._params,
-                                       lib.ZSTD_c_dictIDFlag,
-                                       1 if write_dict_id else 0)
+                write_content_size if write_content_size is not None else 1,
+            )
+
+            _set_compression_parameter(
+                self._params, lib.ZSTD_c_checksumFlag, 1 if write_checksum else 0
+            )
+
+            _set_compression_parameter(
+                self._params, lib.ZSTD_c_dictIDFlag, 1 if write_dict_id else 0
+            )
 
             if threads:
-                _set_compression_parameter(self._params,
-                                           lib.ZSTD_c_nbWorkers,
-                                           threads)
+                _set_compression_parameter(self._params, lib.ZSTD_c_nbWorkers, threads)
 
         cctx = lib.ZSTD_createCCtx()
         if cctx == ffi.NULL:
@@ -1194,15 +1232,16 @@
         try:
             self._setup_cctx()
         finally:
-            self._cctx = ffi.gc(cctx, lib.ZSTD_freeCCtx,
-                                size=lib.ZSTD_sizeof_CCtx(cctx))
+            self._cctx = ffi.gc(
+                cctx, lib.ZSTD_freeCCtx, size=lib.ZSTD_sizeof_CCtx(cctx)
+            )
 
     def _setup_cctx(self):
-        zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(self._cctx,
-                                                             self._params)
+        zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(self._cctx, self._params)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('could not set compression parameters: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError(
+                "could not set compression parameters: %s" % _zstd_error(zresult)
+            )
 
         dict_data = self._dict_data
 
@@ -1211,12 +1250,17 @@
                 zresult = lib.ZSTD_CCtx_refCDict(self._cctx, dict_data._cdict)
             else:
                 zresult = lib.ZSTD_CCtx_loadDictionary_advanced(
-                    self._cctx, dict_data.as_bytes(), len(dict_data),
-                    lib.ZSTD_dlm_byRef, dict_data._dict_type)
+                    self._cctx,
+                    dict_data.as_bytes(),
+                    len(dict_data),
+                    lib.ZSTD_dlm_byRef,
+                    dict_data._dict_type,
+                )
 
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('could not load compression dictionary: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError(
+                    "could not load compression dictionary: %s" % _zstd_error(zresult)
+                )
 
     def memory_size(self):
         return lib.ZSTD_sizeof_CCtx(self._cctx)
@@ -1227,15 +1271,14 @@
         data_buffer = ffi.from_buffer(data)
 
         dest_size = lib.ZSTD_compressBound(len(data_buffer))
-        out = new_nonzero('char[]', dest_size)
+        out = new_nonzero("char[]", dest_size)
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer))
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error setting source size: %s' %
-                            _zstd_error(zresult))
-
-        out_buffer = ffi.new('ZSTD_outBuffer *')
-        in_buffer = ffi.new('ZSTD_inBuffer *')
+            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+        in_buffer = ffi.new("ZSTD_inBuffer *")
 
         out_buffer.dst = out
         out_buffer.size = dest_size
@@ -1245,16 +1288,14 @@
         in_buffer.size = len(data_buffer)
         in_buffer.pos = 0
 
-        zresult = lib.ZSTD_compressStream2(self._cctx,
-                                           out_buffer,
-                                           in_buffer,
-                                           lib.ZSTD_e_end)
+        zresult = lib.ZSTD_compressStream2(
+            self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end
+        )
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('cannot compress: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError("cannot compress: %s" % _zstd_error(zresult))
         elif zresult:
-            raise ZstdError('unexpected partial frame flush')
+            raise ZstdError("unexpected partial frame flush")
 
         return ffi.buffer(out, out_buffer.pos)[:]
 
@@ -1266,12 +1307,11 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error setting source size: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
 
         cobj = ZstdCompressionObj()
-        cobj._out = ffi.new('ZSTD_outBuffer *')
-        cobj._dst_buffer = ffi.new('char[]', COMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+        cobj._out = ffi.new("ZSTD_outBuffer *")
+        cobj._dst_buffer = ffi.new("char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE)
         cobj._out.dst = cobj._dst_buffer
         cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
         cobj._out.pos = 0
@@ -1288,19 +1328,23 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error setting source size: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
 
         return ZstdCompressionChunker(self, chunk_size=chunk_size)
 
-    def copy_stream(self, ifh, ofh, size=-1,
-                    read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
-                    write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
-
-        if not hasattr(ifh, 'read'):
-            raise ValueError('first argument must have a read() method')
-        if not hasattr(ofh, 'write'):
-            raise ValueError('second argument must have a write() method')
+    def copy_stream(
+        self,
+        ifh,
+        ofh,
+        size=-1,
+        read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+        write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+    ):
+
+        if not hasattr(ifh, "read"):
+            raise ValueError("first argument must have a read() method")
+        if not hasattr(ofh, "write"):
+            raise ValueError("second argument must have a write() method")
 
         lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
 
@@ -1309,13 +1353,12 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error setting source size: %s' %
-                            _zstd_error(zresult))
-
-        in_buffer = ffi.new('ZSTD_inBuffer *')
-        out_buffer = ffi.new('ZSTD_outBuffer *')
-
-        dst_buffer = ffi.new('char[]', write_size)
+            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+
+        dst_buffer = ffi.new("char[]", write_size)
         out_buffer.dst = dst_buffer
         out_buffer.size = write_size
         out_buffer.pos = 0
@@ -1334,13 +1377,11 @@
             in_buffer.pos = 0
 
             while in_buffer.pos < in_buffer.size:
-                zresult = lib.ZSTD_compressStream2(self._cctx,
-                                                   out_buffer,
-                                                   in_buffer,
-                                                   lib.ZSTD_e_continue)
+                zresult = lib.ZSTD_compressStream2(
+                    self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+                )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError('zstd compress error: %s' %
-                                    _zstd_error(zresult))
+                    raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
 
                 if out_buffer.pos:
                     ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
@@ -1349,13 +1390,13 @@
 
         # We've finished reading. Flush the compressor.
         while True:
-            zresult = lib.ZSTD_compressStream2(self._cctx,
-                                               out_buffer,
-                                               in_buffer,
-                                               lib.ZSTD_e_end)
+            zresult = lib.ZSTD_compressStream2(
+                self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end
+            )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('error ending compression stream: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError(
+                    "error ending compression stream: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
                 ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
@@ -1367,8 +1408,9 @@
 
         return total_read, total_write
 
-    def stream_reader(self, source, size=-1,
-                      read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE):
+    def stream_reader(
+        self, source, size=-1, read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE
+    ):
         lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
 
         try:
@@ -1381,40 +1423,48 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error setting source size: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
 
         return ZstdCompressionReader(self, source, read_size)
 
-    def stream_writer(self, writer, size=-1,
-                 write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
-                 write_return_read=False):
-
-        if not hasattr(writer, 'write'):
-            raise ValueError('must pass an object with a write() method')
+    def stream_writer(
+        self,
+        writer,
+        size=-1,
+        write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+        write_return_read=False,
+    ):
+
+        if not hasattr(writer, "write"):
+            raise ValueError("must pass an object with a write() method")
 
         lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
 
         if size < 0:
             size = lib.ZSTD_CONTENTSIZE_UNKNOWN
 
-        return ZstdCompressionWriter(self, writer, size, write_size,
-                                     write_return_read)
+        return ZstdCompressionWriter(self, writer, size, write_size, write_return_read)
 
     write_to = stream_writer
 
-    def read_to_iter(self, reader, size=-1,
-                     read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
-                     write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
-        if hasattr(reader, 'read'):
+    def read_to_iter(
+        self,
+        reader,
+        size=-1,
+        read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+        write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+    ):
+        if hasattr(reader, "read"):
             have_read = True
-        elif hasattr(reader, '__getitem__'):
+        elif hasattr(reader, "__getitem__"):
             have_read = False
             buffer_offset = 0
             size = len(reader)
         else:
-            raise ValueError('must pass an object with a read() method or '
-                             'conforms to buffer protocol')
+            raise ValueError(
+                "must pass an object with a read() method or "
+                "conforms to buffer protocol"
+            )
 
         lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only)
 
@@ -1423,17 +1473,16 @@
 
         zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('error setting source size: %s' %
-                            _zstd_error(zresult))
-
-        in_buffer = ffi.new('ZSTD_inBuffer *')
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+            raise ZstdError("error setting source size: %s" % _zstd_error(zresult))
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
 
         in_buffer.src = ffi.NULL
         in_buffer.size = 0
         in_buffer.pos = 0
 
-        dst_buffer = ffi.new('char[]', write_size)
+        dst_buffer = ffi.new("char[]", write_size)
         out_buffer.dst = dst_buffer
         out_buffer.size = write_size
         out_buffer.pos = 0
@@ -1449,7 +1498,7 @@
             else:
                 remaining = len(reader) - buffer_offset
                 slice_size = min(remaining, read_size)
-                read_result = reader[buffer_offset:buffer_offset + slice_size]
+                read_result = reader[buffer_offset : buffer_offset + slice_size]
                 buffer_offset += slice_size
 
             # No new input data. Break out of the read loop.
@@ -1464,11 +1513,11 @@
             in_buffer.pos = 0
 
             while in_buffer.pos < in_buffer.size:
-                zresult = lib.ZSTD_compressStream2(self._cctx, out_buffer, in_buffer,
-                                                   lib.ZSTD_e_continue)
+                zresult = lib.ZSTD_compressStream2(
+                    self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue
+                )
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError('zstd compress error: %s' %
-                                    _zstd_error(zresult))
+                    raise ZstdError("zstd compress error: %s" % _zstd_error(zresult))
 
                 if out_buffer.pos:
                     data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -1484,13 +1533,13 @@
         # remains.
         while True:
             assert out_buffer.pos == 0
-            zresult = lib.ZSTD_compressStream2(self._cctx,
-                                               out_buffer,
-                                               in_buffer,
-                                               lib.ZSTD_e_end)
+            zresult = lib.ZSTD_compressStream2(
+                self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end
+            )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('error ending compression stream: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError(
+                    "error ending compression stream: %s" % _zstd_error(zresult)
+                )
 
             if out_buffer.pos:
                 data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -1522,7 +1571,7 @@
     size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer))
 
     if size == lib.ZSTD_CONTENTSIZE_ERROR:
-        raise ZstdError('error when determining content size')
+        raise ZstdError("error when determining content size")
     elif size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
         return -1
     else:
@@ -1534,24 +1583,23 @@
 
     zresult = lib.ZSTD_frameHeaderSize(data_buffer, len(data_buffer))
     if lib.ZSTD_isError(zresult):
-        raise ZstdError('could not determine frame header size: %s' %
-                        _zstd_error(zresult))
+        raise ZstdError(
+            "could not determine frame header size: %s" % _zstd_error(zresult)
+        )
 
     return zresult
 
 
 def get_frame_parameters(data):
-    params = ffi.new('ZSTD_frameHeader *')
+    params = ffi.new("ZSTD_frameHeader *")
 
     data_buffer = ffi.from_buffer(data)
     zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer))
     if lib.ZSTD_isError(zresult):
-        raise ZstdError('cannot get frame parameters: %s' %
-                        _zstd_error(zresult))
+        raise ZstdError("cannot get frame parameters: %s" % _zstd_error(zresult))
 
     if zresult:
-        raise ZstdError('not enough data for frame parameters; need %d bytes' %
-                        zresult)
+        raise ZstdError("not enough data for frame parameters; need %d bytes" % zresult)
 
     return FrameParameters(params[0])
 
@@ -1563,10 +1611,10 @@
         self.k = k
         self.d = d
 
-        if dict_type not in (DICT_TYPE_AUTO, DICT_TYPE_RAWCONTENT,
-                             DICT_TYPE_FULLDICT):
-            raise ValueError('invalid dictionary load mode: %d; must use '
-                             'DICT_TYPE_* constants')
+        if dict_type not in (DICT_TYPE_AUTO, DICT_TYPE_RAWCONTENT, DICT_TYPE_FULLDICT):
+            raise ValueError(
+                "invalid dictionary load mode: %d; must use " "DICT_TYPE_* constants"
+            )
 
         self._dict_type = dict_type
         self._cdict = None
@@ -1582,16 +1630,15 @@
 
     def precompute_compress(self, level=0, compression_params=None):
         if level and compression_params:
-            raise ValueError('must only specify one of level or '
-                             'compression_params')
+            raise ValueError("must only specify one of level or " "compression_params")
 
         if not level and not compression_params:
-            raise ValueError('must specify one of level or compression_params')
+            raise ValueError("must specify one of level or compression_params")
 
         if level:
             cparams = lib.ZSTD_getCParams(level, 0, len(self._data))
         else:
-            cparams = ffi.new('ZSTD_compressionParameters')
+            cparams = ffi.new("ZSTD_compressionParameters")
             cparams.chainLog = compression_params.chain_log
             cparams.hashLog = compression_params.hash_log
             cparams.minMatch = compression_params.min_match
@@ -1600,59 +1647,75 @@
             cparams.targetLength = compression_params.target_length
             cparams.windowLog = compression_params.window_log
 
-        cdict = lib.ZSTD_createCDict_advanced(self._data, len(self._data),
-                                              lib.ZSTD_dlm_byRef,
-                                              self._dict_type,
-                                              cparams,
-                                              lib.ZSTD_defaultCMem)
+        cdict = lib.ZSTD_createCDict_advanced(
+            self._data,
+            len(self._data),
+            lib.ZSTD_dlm_byRef,
+            self._dict_type,
+            cparams,
+            lib.ZSTD_defaultCMem,
+        )
         if cdict == ffi.NULL:
-            raise ZstdError('unable to precompute dictionary')
-
-        self._cdict = ffi.gc(cdict, lib.ZSTD_freeCDict,
-                             size=lib.ZSTD_sizeof_CDict(cdict))
+            raise ZstdError("unable to precompute dictionary")
+
+        self._cdict = ffi.gc(
+            cdict, lib.ZSTD_freeCDict, size=lib.ZSTD_sizeof_CDict(cdict)
+        )
 
     @property
     def _ddict(self):
-        ddict = lib.ZSTD_createDDict_advanced(self._data, len(self._data),
-                                              lib.ZSTD_dlm_byRef,
-                                              self._dict_type,
-                                              lib.ZSTD_defaultCMem)
+        ddict = lib.ZSTD_createDDict_advanced(
+            self._data,
+            len(self._data),
+            lib.ZSTD_dlm_byRef,
+            self._dict_type,
+            lib.ZSTD_defaultCMem,
+        )
 
         if ddict == ffi.NULL:
-            raise ZstdError('could not create decompression dict')
-
-        ddict = ffi.gc(ddict, lib.ZSTD_freeDDict,
-                       size=lib.ZSTD_sizeof_DDict(ddict))
-        self.__dict__['_ddict'] = ddict
+            raise ZstdError("could not create decompression dict")
+
+        ddict = ffi.gc(ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict))
+        self.__dict__["_ddict"] = ddict
 
         return ddict
 
-def train_dictionary(dict_size, samples, k=0, d=0, notifications=0, dict_id=0,
-                     level=0, steps=0, threads=0):
+
+def train_dictionary(
+    dict_size,
+    samples,
+    k=0,
+    d=0,
+    notifications=0,
+    dict_id=0,
+    level=0,
+    steps=0,
+    threads=0,
+):
     if not isinstance(samples, list):
-        raise TypeError('samples must be a list')
+        raise TypeError("samples must be a list")
 
     if threads < 0:
         threads = _cpu_count()
 
     total_size = sum(map(len, samples))
 
-    samples_buffer = new_nonzero('char[]', total_size)
-    sample_sizes = new_nonzero('size_t[]', len(samples))
+    samples_buffer = new_nonzero("char[]", total_size)
+    sample_sizes = new_nonzero("size_t[]", len(samples))
 
     offset = 0
     for i, sample in enumerate(samples):
         if not isinstance(sample, bytes_type):
-            raise ValueError('samples must be bytes')
+            raise ValueError("samples must be bytes")
 
         l = len(sample)
         ffi.memmove(samples_buffer + offset, sample, l)
         offset += l
         sample_sizes[i] = l
 
-    dict_data = new_nonzero('char[]', dict_size)
-
-    dparams = ffi.new('ZDICT_cover_params_t *')[0]
+    dict_data = new_nonzero("char[]", dict_size)
+
+    dparams = ffi.new("ZDICT_cover_params_t *")[0]
     dparams.k = k
     dparams.d = d
     dparams.steps = steps
@@ -1661,34 +1724,51 @@
     dparams.zParams.dictID = dict_id
     dparams.zParams.compressionLevel = level
 
-    if (not dparams.k and not dparams.d and not dparams.steps
-        and not dparams.nbThreads and not dparams.zParams.notificationLevel
+    if (
+        not dparams.k
+        and not dparams.d
+        and not dparams.steps
+        and not dparams.nbThreads
+        and not dparams.zParams.notificationLevel
         and not dparams.zParams.dictID
-        and not dparams.zParams.compressionLevel):
+        and not dparams.zParams.compressionLevel
+    ):
         zresult = lib.ZDICT_trainFromBuffer(
-            ffi.addressof(dict_data), dict_size,
+            ffi.addressof(dict_data),
+            dict_size,
             ffi.addressof(samples_buffer),
-            ffi.addressof(sample_sizes, 0), len(samples))
+            ffi.addressof(sample_sizes, 0),
+            len(samples),
+        )
     elif dparams.steps or dparams.nbThreads:
         zresult = lib.ZDICT_optimizeTrainFromBuffer_cover(
-            ffi.addressof(dict_data), dict_size,
+            ffi.addressof(dict_data),
+            dict_size,
             ffi.addressof(samples_buffer),
-            ffi.addressof(sample_sizes, 0), len(samples),
-            ffi.addressof(dparams))
+            ffi.addressof(sample_sizes, 0),
+            len(samples),
+            ffi.addressof(dparams),
+        )
     else:
         zresult = lib.ZDICT_trainFromBuffer_cover(
-            ffi.addressof(dict_data), dict_size,
+            ffi.addressof(dict_data),
+            dict_size,
             ffi.addressof(samples_buffer),
-            ffi.addressof(sample_sizes, 0), len(samples),
-            dparams)
+            ffi.addressof(sample_sizes, 0),
+            len(samples),
+            dparams,
+        )
 
     if lib.ZDICT_isError(zresult):
-        msg = ffi.string(lib.ZDICT_getErrorName(zresult)).decode('utf-8')
-        raise ZstdError('cannot train dict: %s' % msg)
-
-    return ZstdCompressionDict(ffi.buffer(dict_data, zresult)[:],
-                               dict_type=DICT_TYPE_FULLDICT,
-                               k=dparams.k, d=dparams.d)
+        msg = ffi.string(lib.ZDICT_getErrorName(zresult)).decode("utf-8")
+        raise ZstdError("cannot train dict: %s" % msg)
+
+    return ZstdCompressionDict(
+        ffi.buffer(dict_data, zresult)[:],
+        dict_type=DICT_TYPE_FULLDICT,
+        k=dparams.k,
+        d=dparams.d,
+    )
 
 
 class ZstdDecompressionObj(object):
@@ -1699,21 +1779,21 @@
 
     def decompress(self, data):
         if self._finished:
-            raise ZstdError('cannot use a decompressobj multiple times')
-
-        in_buffer = ffi.new('ZSTD_inBuffer *')
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+            raise ZstdError("cannot use a decompressobj multiple times")
+
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
 
         data_buffer = ffi.from_buffer(data)
 
         if len(data_buffer) == 0:
-            return b''
+            return b""
 
         in_buffer.src = data_buffer
         in_buffer.size = len(data_buffer)
         in_buffer.pos = 0
 
-        dst_buffer = ffi.new('char[]', self._write_size)
+        dst_buffer = ffi.new("char[]", self._write_size)
         out_buffer.dst = dst_buffer
         out_buffer.size = len(dst_buffer)
         out_buffer.pos = 0
@@ -1721,11 +1801,11 @@
         chunks = []
 
         while True:
-            zresult = lib.ZSTD_decompressStream(self._decompressor._dctx,
-                                                out_buffer, in_buffer)
+            zresult = lib.ZSTD_decompressStream(
+                self._decompressor._dctx, out_buffer, in_buffer
+            )
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('zstd decompressor error: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError("zstd decompressor error: %s" % _zstd_error(zresult))
 
             if zresult == 0:
                 self._finished = True
@@ -1734,13 +1814,14 @@
             if out_buffer.pos:
                 chunks.append(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
 
-            if (zresult == 0 or
-                    (in_buffer.pos == in_buffer.size and out_buffer.pos == 0)):
+            if zresult == 0 or (
+                in_buffer.pos == in_buffer.size and out_buffer.pos == 0
+            ):
                 break
 
             out_buffer.pos = 0
 
-        return b''.join(chunks)
+        return b"".join(chunks)
 
     def flush(self, length=0):
         pass
@@ -1757,13 +1838,13 @@
         self._bytes_decompressed = 0
         self._finished_input = False
         self._finished_output = False
-        self._in_buffer = ffi.new('ZSTD_inBuffer *')
+        self._in_buffer = ffi.new("ZSTD_inBuffer *")
         # Holds a ref to self._in_buffer.src.
         self._source_buffer = None
 
     def __enter__(self):
         if self._entered:
-            raise ValueError('cannot __enter__ multiple times')
+            raise ValueError("cannot __enter__ multiple times")
 
         self._entered = True
         return self
@@ -1824,7 +1905,7 @@
 
             chunks.append(chunk)
 
-        return b''.join(chunks)
+        return b"".join(chunks)
 
     def __iter__(self):
         raise io.UnsupportedOperation()
@@ -1844,7 +1925,7 @@
             return
 
         # Else populate the input buffer from our source.
-        if hasattr(self._source, 'read'):
+        if hasattr(self._source, "read"):
             data = self._source.read(self._read_size)
 
             if not data:
@@ -1866,8 +1947,9 @@
 
         Returns True if data in output buffer should be emitted.
         """
-        zresult = lib.ZSTD_decompressStream(self._decompressor._dctx,
-                                            out_buffer, self._in_buffer)
+        zresult = lib.ZSTD_decompressStream(
+            self._decompressor._dctx, out_buffer, self._in_buffer
+        )
 
         if self._in_buffer.pos == self._in_buffer.size:
             self._in_buffer.src = ffi.NULL
@@ -1875,38 +1957,39 @@
             self._in_buffer.size = 0
             self._source_buffer = None
 
-            if not hasattr(self._source, 'read'):
+            if not hasattr(self._source, "read"):
                 self._finished_input = True
 
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('zstd decompress error: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
 
         # Emit data if there is data AND either:
         # a) output buffer is full (read amount is satisfied)
         # b) we're at end of a frame and not in frame spanning mode
-        return (out_buffer.pos and
-                (out_buffer.pos == out_buffer.size or
-                 zresult == 0 and not self._read_across_frames))
+        return out_buffer.pos and (
+            out_buffer.pos == out_buffer.size
+            or zresult == 0
+            and not self._read_across_frames
+        )
 
     def read(self, size=-1):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         if size < -1:
-            raise ValueError('cannot read negative amounts less than -1')
+            raise ValueError("cannot read negative amounts less than -1")
 
         if size == -1:
             # This is recursive. But it gets the job done.
             return self.readall()
 
         if self._finished_output or size == 0:
-            return b''
+            return b""
 
         # We /could/ call into readinto() here. But that introduces more
         # overhead.
-        dst_buffer = ffi.new('char[]', size)
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        dst_buffer = ffi.new("char[]", size)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
         out_buffer.dst = dst_buffer
         out_buffer.size = size
         out_buffer.pos = 0
@@ -1927,15 +2010,15 @@
 
     def readinto(self, b):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         if self._finished_output:
             return 0
 
         # TODO use writable=True once we require CFFI >= 1.12.
         dest_buffer = ffi.from_buffer(b)
-        ffi.memmove(b, b'', 0)
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        ffi.memmove(b, b"", 0)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
         out_buffer.dst = dest_buffer
         out_buffer.size = len(dest_buffer)
         out_buffer.pos = 0
@@ -1956,20 +2039,20 @@
 
     def read1(self, size=-1):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         if size < -1:
-            raise ValueError('cannot read negative amounts less than -1')
+            raise ValueError("cannot read negative amounts less than -1")
 
         if self._finished_output or size == 0:
-            return b''
+            return b""
 
         # -1 returns arbitrary number of bytes.
         if size == -1:
             size = DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE
 
-        dst_buffer = ffi.new('char[]', size)
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        dst_buffer = ffi.new("char[]", size)
+        out_buffer = ffi.new("ZSTD_outBuffer *")
         out_buffer.dst = dst_buffer
         out_buffer.size = size
         out_buffer.pos = 0
@@ -1990,16 +2073,16 @@
 
     def readinto1(self, b):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         if self._finished_output:
             return 0
 
         # TODO use writable=True once we require CFFI >= 1.12.
         dest_buffer = ffi.from_buffer(b)
-        ffi.memmove(b, b'', 0)
-
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        ffi.memmove(b, b"", 0)
+
+        out_buffer = ffi.new("ZSTD_outBuffer *")
         out_buffer.dst = dest_buffer
         out_buffer.size = len(dest_buffer)
         out_buffer.pos = 0
@@ -2016,33 +2099,31 @@
 
     def seek(self, pos, whence=os.SEEK_SET):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         read_amount = 0
 
         if whence == os.SEEK_SET:
             if pos < 0:
-                raise ValueError('cannot seek to negative position with SEEK_SET')
+                raise ValueError("cannot seek to negative position with SEEK_SET")
 
             if pos < self._bytes_decompressed:
-                raise ValueError('cannot seek zstd decompression stream '
-                                 'backwards')
+                raise ValueError("cannot seek zstd decompression stream " "backwards")
 
             read_amount = pos - self._bytes_decompressed
 
         elif whence == os.SEEK_CUR:
             if pos < 0:
-                raise ValueError('cannot seek zstd decompression stream '
-                                 'backwards')
+                raise ValueError("cannot seek zstd decompression stream " "backwards")
 
             read_amount = pos
         elif whence == os.SEEK_END:
-            raise ValueError('zstd decompression streams cannot be seeked '
-                             'with SEEK_END')
+            raise ValueError(
+                "zstd decompression streams cannot be seeked " "with SEEK_END"
+            )
 
         while read_amount:
-            result = self.read(min(read_amount,
-                                   DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE))
+            result = self.read(min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE))
 
             if not result:
                 break
@@ -2051,6 +2132,7 @@
 
         return self._bytes_decompressed
 
+
 class ZstdDecompressionWriter(object):
     def __init__(self, decompressor, writer, write_size, write_return_read):
         decompressor._ensure_dctx()
@@ -2064,10 +2146,10 @@
 
     def __enter__(self):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         if self._entered:
-            raise ZstdError('cannot __enter__ multiple times')
+            raise ZstdError("cannot __enter__ multiple times")
 
         self._entered = True
 
@@ -2089,7 +2171,7 @@
         finally:
             self._closed = True
 
-        f = getattr(self._writer, 'close', None)
+        f = getattr(self._writer, "close", None)
         if f:
             f()
 
@@ -2098,17 +2180,17 @@
         return self._closed
 
     def fileno(self):
-        f = getattr(self._writer, 'fileno', None)
+        f = getattr(self._writer, "fileno", None)
         if f:
             return f()
         else:
-            raise OSError('fileno not available on underlying writer')
+            raise OSError("fileno not available on underlying writer")
 
     def flush(self):
         if self._closed:
-            raise ValueError('stream is closed')
-
-        f = getattr(self._writer, 'flush', None)
+            raise ValueError("stream is closed")
+
+        f = getattr(self._writer, "flush", None)
         if f:
             return f()
 
@@ -2153,19 +2235,19 @@
 
     def write(self, data):
         if self._closed:
-            raise ValueError('stream is closed')
+            raise ValueError("stream is closed")
 
         total_write = 0
 
-        in_buffer = ffi.new('ZSTD_inBuffer *')
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
 
         data_buffer = ffi.from_buffer(data)
         in_buffer.src = data_buffer
         in_buffer.size = len(data_buffer)
         in_buffer.pos = 0
 
-        dst_buffer = ffi.new('char[]', self._write_size)
+        dst_buffer = ffi.new("char[]", self._write_size)
         out_buffer.dst = dst_buffer
         out_buffer.size = len(dst_buffer)
         out_buffer.pos = 0
@@ -2175,8 +2257,7 @@
         while in_buffer.pos < in_buffer.size:
             zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer)
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('zstd decompress error: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
 
             if out_buffer.pos:
                 self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
@@ -2206,8 +2287,9 @@
         try:
             self._ensure_dctx()
         finally:
-            self._dctx = ffi.gc(dctx, lib.ZSTD_freeDCtx,
-                                size=lib.ZSTD_sizeof_DCtx(dctx))
+            self._dctx = ffi.gc(
+                dctx, lib.ZSTD_freeDCtx, size=lib.ZSTD_sizeof_DCtx(dctx)
+            )
 
     def memory_size(self):
         return lib.ZSTD_sizeof_DCtx(self._dctx)
@@ -2220,85 +2302,96 @@
         output_size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer))
 
         if output_size == lib.ZSTD_CONTENTSIZE_ERROR:
-            raise ZstdError('error determining content size from frame header')
+            raise ZstdError("error determining content size from frame header")
         elif output_size == 0:
-            return b''
+            return b""
         elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
             if not max_output_size:
-                raise ZstdError('could not determine content size in frame header')
-
-            result_buffer = ffi.new('char[]', max_output_size)
+                raise ZstdError("could not determine content size in frame header")
+
+            result_buffer = ffi.new("char[]", max_output_size)
             result_size = max_output_size
             output_size = 0
         else:
-            result_buffer = ffi.new('char[]', output_size)
+            result_buffer = ffi.new("char[]", output_size)
             result_size = output_size
 
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        out_buffer = ffi.new("ZSTD_outBuffer *")
         out_buffer.dst = result_buffer
         out_buffer.size = result_size
         out_buffer.pos = 0
 
-        in_buffer = ffi.new('ZSTD_inBuffer *')
+        in_buffer = ffi.new("ZSTD_inBuffer *")
         in_buffer.src = data_buffer
         in_buffer.size = len(data_buffer)
         in_buffer.pos = 0
 
         zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('decompression error: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError("decompression error: %s" % _zstd_error(zresult))
         elif zresult:
-            raise ZstdError('decompression error: did not decompress full frame')
+            raise ZstdError("decompression error: did not decompress full frame")
         elif output_size and out_buffer.pos != output_size:
-            raise ZstdError('decompression error: decompressed %d bytes; expected %d' %
-                            (zresult, output_size))
+            raise ZstdError(
+                "decompression error: decompressed %d bytes; expected %d"
+                % (zresult, output_size)
+            )
 
         return ffi.buffer(result_buffer, out_buffer.pos)[:]
 
-    def stream_reader(self, source, read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
-                      read_across_frames=False):
+    def stream_reader(
+        self,
+        source,
+        read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+        read_across_frames=False,
+    ):
         self._ensure_dctx()
         return ZstdDecompressionReader(self, source, read_size, read_across_frames)
 
     def decompressobj(self, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
         if write_size < 1:
-            raise ValueError('write_size must be positive')
+            raise ValueError("write_size must be positive")
 
         self._ensure_dctx()
         return ZstdDecompressionObj(self, write_size=write_size)
 
-    def read_to_iter(self, reader, read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
-                     write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
-                     skip_bytes=0):
+    def read_to_iter(
+        self,
+        reader,
+        read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+        write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+        skip_bytes=0,
+    ):
         if skip_bytes >= read_size:
-            raise ValueError('skip_bytes must be smaller than read_size')
-
-        if hasattr(reader, 'read'):
+            raise ValueError("skip_bytes must be smaller than read_size")
+
+        if hasattr(reader, "read"):
             have_read = True
-        elif hasattr(reader, '__getitem__'):
+        elif hasattr(reader, "__getitem__"):
             have_read = False
             buffer_offset = 0
             size = len(reader)
         else:
-            raise ValueError('must pass an object with a read() method or '
-                             'conforms to buffer protocol')
+            raise ValueError(
+                "must pass an object with a read() method or "
+                "conforms to buffer protocol"
+            )
 
         if skip_bytes:
             if have_read:
                 reader.read(skip_bytes)
             else:
                 if skip_bytes > size:
-                    raise ValueError('skip_bytes larger than first input chunk')
+                    raise ValueError("skip_bytes larger than first input chunk")
 
                 buffer_offset = skip_bytes
 
         self._ensure_dctx()
 
-        in_buffer = ffi.new('ZSTD_inBuffer *')
-        out_buffer = ffi.new('ZSTD_outBuffer *')
-
-        dst_buffer = ffi.new('char[]', write_size)
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+
+        dst_buffer = ffi.new("char[]", write_size)
         out_buffer.dst = dst_buffer
         out_buffer.size = len(dst_buffer)
         out_buffer.pos = 0
@@ -2311,7 +2404,7 @@
             else:
                 remaining = size - buffer_offset
                 slice_size = min(remaining, read_size)
-                read_result = reader[buffer_offset:buffer_offset + slice_size]
+                read_result = reader[buffer_offset : buffer_offset + slice_size]
                 buffer_offset += slice_size
 
             # No new input. Break out of read loop.
@@ -2330,8 +2423,7 @@
 
                 zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError('zstd decompress error: %s' %
-                                    _zstd_error(zresult))
+                    raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult))
 
                 if out_buffer.pos:
                     data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
@@ -2348,30 +2440,37 @@
 
     read_from = read_to_iter
 
-    def stream_writer(self, writer, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
-                      write_return_read=False):
-        if not hasattr(writer, 'write'):
-            raise ValueError('must pass an object with a write() method')
-
-        return ZstdDecompressionWriter(self, writer, write_size,
-                                       write_return_read)
+    def stream_writer(
+        self,
+        writer,
+        write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+        write_return_read=False,
+    ):
+        if not hasattr(writer, "write"):
+            raise ValueError("must pass an object with a write() method")
+
+        return ZstdDecompressionWriter(self, writer, write_size, write_return_read)
 
     write_to = stream_writer
 
-    def copy_stream(self, ifh, ofh,
-                    read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
-                    write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
-        if not hasattr(ifh, 'read'):
-            raise ValueError('first argument must have a read() method')
-        if not hasattr(ofh, 'write'):
-            raise ValueError('second argument must have a write() method')
+    def copy_stream(
+        self,
+        ifh,
+        ofh,
+        read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+        write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+    ):
+        if not hasattr(ifh, "read"):
+            raise ValueError("first argument must have a read() method")
+        if not hasattr(ofh, "write"):
+            raise ValueError("second argument must have a write() method")
 
         self._ensure_dctx()
 
-        in_buffer = ffi.new('ZSTD_inBuffer *')
-        out_buffer = ffi.new('ZSTD_outBuffer *')
-
-        dst_buffer = ffi.new('char[]', write_size)
+        in_buffer = ffi.new("ZSTD_inBuffer *")
+        out_buffer = ffi.new("ZSTD_outBuffer *")
+
+        dst_buffer = ffi.new("char[]", write_size)
         out_buffer.dst = dst_buffer
         out_buffer.size = write_size
         out_buffer.pos = 0
@@ -2394,8 +2493,9 @@
             while in_buffer.pos < in_buffer.size:
                 zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
                 if lib.ZSTD_isError(zresult):
-                    raise ZstdError('zstd decompressor error: %s' %
-                                    _zstd_error(zresult))
+                    raise ZstdError(
+                        "zstd decompressor error: %s" % _zstd_error(zresult)
+                    )
 
                 if out_buffer.pos:
                     ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
@@ -2408,48 +2508,47 @@
 
     def decompress_content_dict_chain(self, frames):
         if not isinstance(frames, list):
-            raise TypeError('argument must be a list')
+            raise TypeError("argument must be a list")
 
         if not frames:
-            raise ValueError('empty input chain')
+            raise ValueError("empty input chain")
 
         # First chunk should not be using a dictionary. We handle it specially.
         chunk = frames[0]
         if not isinstance(chunk, bytes_type):
-            raise ValueError('chunk 0 must be bytes')
+            raise ValueError("chunk 0 must be bytes")
 
         # All chunks should be zstd frames and should have content size set.
         chunk_buffer = ffi.from_buffer(chunk)
-        params = ffi.new('ZSTD_frameHeader *')
+        params = ffi.new("ZSTD_frameHeader *")
         zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer))
         if lib.ZSTD_isError(zresult):
-            raise ValueError('chunk 0 is not a valid zstd frame')
+            raise ValueError("chunk 0 is not a valid zstd frame")
         elif zresult:
-            raise ValueError('chunk 0 is too small to contain a zstd frame')
+            raise ValueError("chunk 0 is too small to contain a zstd frame")
 
         if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN:
-            raise ValueError('chunk 0 missing content size in frame')
+            raise ValueError("chunk 0 missing content size in frame")
 
         self._ensure_dctx(load_dict=False)
 
-        last_buffer = ffi.new('char[]', params.frameContentSize)
-
-        out_buffer = ffi.new('ZSTD_outBuffer *')
+        last_buffer = ffi.new("char[]", params.frameContentSize)
+
+        out_buffer = ffi.new("ZSTD_outBuffer *")
         out_buffer.dst = last_buffer
         out_buffer.size = len(last_buffer)
         out_buffer.pos = 0
 
-        in_buffer = ffi.new('ZSTD_inBuffer *')
+        in_buffer = ffi.new("ZSTD_inBuffer *")
         in_buffer.src = chunk_buffer
         in_buffer.size = len(chunk_buffer)
         in_buffer.pos = 0
 
         zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('could not decompress chunk 0: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError("could not decompress chunk 0: %s" % _zstd_error(zresult))
         elif zresult:
-            raise ZstdError('chunk 0 did not decompress full frame')
+            raise ZstdError("chunk 0 did not decompress full frame")
 
         # Special case of chain length of 1
         if len(frames) == 1:
@@ -2459,19 +2558,19 @@
         while i < len(frames):
             chunk = frames[i]
             if not isinstance(chunk, bytes_type):
-                raise ValueError('chunk %d must be bytes' % i)
+                raise ValueError("chunk %d must be bytes" % i)
 
             chunk_buffer = ffi.from_buffer(chunk)
             zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer))
             if lib.ZSTD_isError(zresult):
-                raise ValueError('chunk %d is not a valid zstd frame' % i)
+                raise ValueError("chunk %d is not a valid zstd frame" % i)
             elif zresult:
-                raise ValueError('chunk %d is too small to contain a zstd frame' % i)
+                raise ValueError("chunk %d is too small to contain a zstd frame" % i)
 
             if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN:
-                raise ValueError('chunk %d missing content size in frame' % i)
-
-            dest_buffer = ffi.new('char[]', params.frameContentSize)
+                raise ValueError("chunk %d missing content size in frame" % i)
+
+            dest_buffer = ffi.new("char[]", params.frameContentSize)
 
             out_buffer.dst = dest_buffer
             out_buffer.size = len(dest_buffer)
@@ -2483,10 +2582,11 @@
 
             zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('could not decompress chunk %d: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError(
+                    "could not decompress chunk %d: %s" % _zstd_error(zresult)
+                )
             elif zresult:
-                raise ZstdError('chunk %d did not decompress full frame' % i)
+                raise ZstdError("chunk %d did not decompress full frame" % i)
 
             last_buffer = dest_buffer
             i += 1
@@ -2497,19 +2597,19 @@
         lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only)
 
         if self._max_window_size:
-            zresult = lib.ZSTD_DCtx_setMaxWindowSize(self._dctx,
-                                                     self._max_window_size)
+            zresult = lib.ZSTD_DCtx_setMaxWindowSize(self._dctx, self._max_window_size)
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('unable to set max window size: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError(
+                    "unable to set max window size: %s" % _zstd_error(zresult)
+                )
 
         zresult = lib.ZSTD_DCtx_setFormat(self._dctx, self._format)
         if lib.ZSTD_isError(zresult):
-            raise ZstdError('unable to set decoding format: %s' %
-                            _zstd_error(zresult))
+            raise ZstdError("unable to set decoding format: %s" % _zstd_error(zresult))
 
         if self._dict_data and load_dict:
             zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict)
             if lib.ZSTD_isError(zresult):
-                raise ZstdError('unable to reference prepared dictionary: %s' %
-                                _zstd_error(zresult))
+                raise ZstdError(
+                    "unable to reference prepared dictionary: %s" % _zstd_error(zresult)
+                )
--- a/contrib/python-zstandard/zstd.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd.c	Tue Jan 21 13:14:51 2020 -0500
@@ -210,7 +210,7 @@
 	   We detect this mismatch here and refuse to load the module if this
 	   scenario is detected.
 	*/
-	if (ZSTD_VERSION_NUMBER != 10403 || ZSTD_versionNumber() != 10403) {
+	if (ZSTD_VERSION_NUMBER != 10404 || ZSTD_versionNumber() != 10404) {
 		PyErr_SetString(PyExc_ImportError, "zstd C API mismatch; Python bindings not compiled against expected zstd version");
 		return;
 	}
--- a/contrib/python-zstandard/zstd/common/bitstream.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/common/bitstream.h	Tue Jan 21 13:14:51 2020 -0500
@@ -164,7 +164,7 @@
         _BitScanReverse ( &r, val );
         return (unsigned) r;
 #   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
-        return 31 - __builtin_clz (val);
+        return __builtin_clz (val) ^ 31;
 #   elif defined(__ICCARM__)    /* IAR Intrinsic */
         return 31 - __CLZ(val);
 #   else   /* Software version */
@@ -244,9 +244,9 @@
 {
     size_t const nbBytes = bitC->bitPos >> 3;
     assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+    assert(bitC->ptr <= bitC->endPtr);
     MEM_writeLEST(bitC->ptr, bitC->bitContainer);
     bitC->ptr += nbBytes;
-    assert(bitC->ptr <= bitC->endPtr);
     bitC->bitPos &= 7;
     bitC->bitContainer >>= nbBytes*8;
 }
@@ -260,6 +260,7 @@
 {
     size_t const nbBytes = bitC->bitPos >> 3;
     assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+    assert(bitC->ptr <= bitC->endPtr);
     MEM_writeLEST(bitC->ptr, bitC->bitContainer);
     bitC->ptr += nbBytes;
     if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
--- a/contrib/python-zstandard/zstd/common/compiler.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/common/compiler.h	Tue Jan 21 13:14:51 2020 -0500
@@ -61,6 +61,13 @@
 #  define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
 #endif
 
+/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
+#if defined(__GNUC__)
+#  define UNUSED_ATTR __attribute__((unused))
+#else
+#  define UNUSED_ATTR
+#endif
+
 /* force no inlining */
 #ifdef _MSC_VER
 #  define FORCE_NOINLINE static __declspec(noinline)
@@ -127,9 +134,14 @@
     }                                     \
 }
 
-/* vectorization */
+/* vectorization
+ * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
 #if !defined(__clang__) && defined(__GNUC__)
-#  define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
+#  if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
+#    define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
+#  else
+#    define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
+#  endif
 #else
 #  define DONT_VECTORIZE
 #endif
--- a/contrib/python-zstandard/zstd/common/fse.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/common/fse.h	Tue Jan 21 13:14:51 2020 -0500
@@ -308,7 +308,7 @@
 *******************************************/
 /* FSE buffer bounds */
 #define FSE_NCOUNTBOUND 512
-#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_BLOCKBOUND(size) (size + (size>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
 #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
 
 /* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
--- a/contrib/python-zstandard/zstd/common/fse_decompress.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/common/fse_decompress.c	Tue Jan 21 13:14:51 2020 -0500
@@ -52,7 +52,9 @@
 #define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
 
 /* check and forward error code */
+#ifndef CHECK_F
 #define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
+#endif
 
 
 /* **************************************************************
--- a/contrib/python-zstandard/zstd/common/mem.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/common/mem.h	Tue Jan 21 13:14:51 2020 -0500
@@ -47,6 +47,79 @@
 #define MEM_STATIC_ASSERT(c)   { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
 MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
 
+/* detects whether we are being compiled under msan */
+#if defined (__has_feature)
+#  if __has_feature(memory_sanitizer)
+#    define MEMORY_SANITIZER 1
+#  endif
+#endif
+
+#if defined (MEMORY_SANITIZER)
+/* Not all platforms that support msan provide sanitizers/msan_interface.h.
+ * We therefore declare the functions we need ourselves, rather than trying to
+ * include the header file... */
+
+#include <stdint.h> /* intptr_t */
+
+/* Make memory region fully initialized (without changing its contents). */
+void __msan_unpoison(const volatile void *a, size_t size);
+
+/* Make memory region fully uninitialized (without changing its contents).
+   This is a legacy interface that does not update origin information. Use
+   __msan_allocated_memory() instead. */
+void __msan_poison(const volatile void *a, size_t size);
+
+/* Returns the offset of the first (at least partially) poisoned byte in the
+   memory range, or -1 if the whole range is good. */
+intptr_t __msan_test_shadow(const volatile void *x, size_t size);
+#endif
+
+/* detects whether we are being compiled under asan */
+#if defined (__has_feature)
+#  if __has_feature(address_sanitizer)
+#    define ADDRESS_SANITIZER 1
+#  endif
+#elif defined(__SANITIZE_ADDRESS__)
+#  define ADDRESS_SANITIZER 1
+#endif
+
+#if defined (ADDRESS_SANITIZER)
+/* Not all platforms that support asan provide sanitizers/asan_interface.h.
+ * We therefore declare the functions we need ourselves, rather than trying to
+ * include the header file... */
+
+/**
+ * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
+ *
+ * This memory must be previously allocated by your program. Instrumented
+ * code is forbidden from accessing addresses in this region until it is
+ * unpoisoned. This function is not guaranteed to poison the entire region -
+ * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
+ * alignment restrictions.
+ *
+ * \note This function is not thread-safe because no two threads can poison or
+ * unpoison memory in the same memory region simultaneously.
+ *
+ * \param addr Start of memory region.
+ * \param size Size of memory region. */
+void __asan_poison_memory_region(void const volatile *addr, size_t size);
+
+/**
+ * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
+ *
+ * This memory must be previously allocated by your program. Accessing
+ * addresses in this region is allowed until this region is poisoned again.
+ * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
+ * to ASan alignment restrictions.
+ *
+ * \note This function is not thread-safe because no two threads can
+ * poison or unpoison memory in the same memory region simultaneously.
+ *
+ * \param addr Start of memory region.
+ * \param size Size of memory region. */
+void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
+#endif
+
 
 /*-**************************************************************
 *  Basic Types
--- a/contrib/python-zstandard/zstd/common/pool.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/common/pool.c	Tue Jan 21 13:14:51 2020 -0500
@@ -127,9 +127,13 @@
     ctx->queueTail = 0;
     ctx->numThreadsBusy = 0;
     ctx->queueEmpty = 1;
-    (void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
-    (void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
-    (void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
+    {
+        int error = 0;
+        error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
+        error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
+        error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
+        if (error) { POOL_free(ctx); return NULL; }
+    }
     ctx->shutdown = 0;
     /* Allocate space for the thread handles */
     ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
--- a/contrib/python-zstandard/zstd/common/threading.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/common/threading.c	Tue Jan 21 13:14:51 2020 -0500
@@ -14,6 +14,8 @@
  * This file will hold wrapper for systems, which do not support pthreads
  */
 
+#include "threading.h"
+
 /* create fake symbol to avoid empty translation unit warning */
 int g_ZSTD_threading_useless_symbol;
 
@@ -28,7 +30,6 @@
 /* ===  Dependencies  === */
 #include <process.h>
 #include <errno.h>
-#include "threading.h"
 
 
 /* ===  Implementation  === */
@@ -73,3 +74,47 @@
 }
 
 #endif   /* ZSTD_MULTITHREAD */
+
+#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
+
+#include <stdlib.h>
+
+int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
+{
+    *mutex = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t));
+    if (!*mutex)
+        return 1;
+    return pthread_mutex_init(*mutex, attr);
+}
+
+int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
+{
+    if (!*mutex)
+        return 0;
+    {
+        int const ret = pthread_mutex_destroy(*mutex);
+        free(*mutex);
+        return ret;
+    }
+}
+
+int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
+{
+    *cond = (pthread_cond_t*)malloc(sizeof(pthread_cond_t));
+    if (!*cond)
+        return 1;
+    return pthread_cond_init(*cond, attr);
+}
+
+int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
+{
+    if (!*cond)
+        return 0;
+    {
+        int const ret = pthread_cond_destroy(*cond);
+        free(*cond);
+        return ret;
+    }
+}
+
+#endif
--- a/contrib/python-zstandard/zstd/common/threading.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/common/threading.h	Tue Jan 21 13:14:51 2020 -0500
@@ -13,6 +13,8 @@
 #ifndef THREADING_H_938743
 #define THREADING_H_938743
 
+#include "debug.h"
+
 #if defined (__cplusplus)
 extern "C" {
 #endif
@@ -75,10 +77,12 @@
  */
 
 
-#elif defined(ZSTD_MULTITHREAD)   /* posix assumed ; need a better detection method */
+#elif defined(ZSTD_MULTITHREAD)    /* posix assumed ; need a better detection method */
 /* ===   POSIX Systems   === */
 #  include <pthread.h>
 
+#if DEBUGLEVEL < 1
+
 #define ZSTD_pthread_mutex_t            pthread_mutex_t
 #define ZSTD_pthread_mutex_init(a, b)   pthread_mutex_init((a), (b))
 #define ZSTD_pthread_mutex_destroy(a)   pthread_mutex_destroy((a))
@@ -96,6 +100,33 @@
 #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
 #define ZSTD_pthread_join(a, b)         pthread_join((a),(b))
 
+#else /* DEBUGLEVEL >= 1 */
+
+/* Debug implementation of threading.
+ * In this implementation we use pointers for mutexes and condition variables.
+ * This way, if we forget to init/destroy them the program will crash or ASAN
+ * will report leaks.
+ */
+
+#define ZSTD_pthread_mutex_t            pthread_mutex_t*
+int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr);
+int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex);
+#define ZSTD_pthread_mutex_lock(a)      pthread_mutex_lock(*(a))
+#define ZSTD_pthread_mutex_unlock(a)    pthread_mutex_unlock(*(a))
+
+#define ZSTD_pthread_cond_t             pthread_cond_t*
+int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr);
+int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);
+#define ZSTD_pthread_cond_wait(a, b)    pthread_cond_wait(*(a), *(b))
+#define ZSTD_pthread_cond_signal(a)     pthread_cond_signal(*(a))
+#define ZSTD_pthread_cond_broadcast(a)  pthread_cond_broadcast(*(a))
+
+#define ZSTD_pthread_t                  pthread_t
+#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
+#define ZSTD_pthread_join(a, b)         pthread_join((a),(b))
+
+#endif
+
 #else  /* ZSTD_MULTITHREAD not defined */
 /* No multithreading support */
 
--- a/contrib/python-zstandard/zstd/common/zstd_internal.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/common/zstd_internal.h	Tue Jan 21 13:14:51 2020 -0500
@@ -197,79 +197,56 @@
 static void ZSTD_copy16(void* dst, const void* src) { memcpy(dst, src, 16); }
 #define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
 
-#define WILDCOPY_OVERLENGTH 8
-#define VECLEN 16
+#define WILDCOPY_OVERLENGTH 32
+#define WILDCOPY_VECLEN 16
 
 typedef enum {
     ZSTD_no_overlap,
-    ZSTD_overlap_src_before_dst,
+    ZSTD_overlap_src_before_dst
     /*  ZSTD_overlap_dst_before_src, */
 } ZSTD_overlap_e;
 
 /*! ZSTD_wildcopy() :
- *  custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
+ *  Custom version of memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
+ *  @param ovtype controls the overlap detection
+ *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
+ *         - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
+ *           The src buffer must be before the dst buffer.
+ */
 MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
-void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
+void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
 {
     ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
     const BYTE* ip = (const BYTE*)src;
     BYTE* op = (BYTE*)dst;
     BYTE* const oend = op + length;
 
-    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
-    if (length < VECLEN || (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN)) {
-      do
-          COPY8(op, ip)
-      while (op < oend);
-    }
-    else {
-      if ((length & 8) == 0)
-        COPY8(op, ip);
-      do {
-        COPY16(op, ip);
-      }
-      while (op < oend);
-    }
-}
-
-/*! ZSTD_wildcopy_16min() :
- *  same semantics as ZSTD_wilcopy() except guaranteed to be able to copy 16 bytes at the start */
-MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
-void ZSTD_wildcopy_16min(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
-{
-    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = op + length;
+    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
 
-    assert(length >= 8);
-    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
-
-    if (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN) {
-      do
-          COPY8(op, ip)
-      while (op < oend);
-    }
-    else {
-      if ((length & 8) == 0)
-        COPY8(op, ip);
-      do {
+    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
+        /* Handle short offset copies. */
+        do {
+            COPY8(op, ip)
+        } while (op < oend);
+    } else {
+        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
+        /* Separate out the first two COPY16() calls because the copy length is
+         * almost certain to be short, so the branches have different
+         * probabilities.
+         * On gcc-9 unrolling once is +1.6%, twice is +2%, thrice is +1.8%.
+         * On clang-8 unrolling once is +1.4%, twice is +3.3%, thrice is +3%.
+         */
         COPY16(op, ip);
-      }
-      while (op < oend);
+        COPY16(op, ip);
+        if (op >= oend) return;
+        do {
+            COPY16(op, ip);
+            COPY16(op, ip);
+        }
+        while (op < oend);
     }
 }
 
-MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd)   /* should be faster for decoding, but strangely, not verified on all platform */
-{
-    const BYTE* ip = (const BYTE*)src;
-    BYTE* op = (BYTE*)dst;
-    BYTE* const oend = (BYTE*)dstEnd;
-    do
-        COPY8(op, ip)
-    while (op < oend);
-}
-
 
 /*-*******************************************
 *  Private declarations
@@ -323,7 +300,7 @@
         _BitScanReverse(&r, val);
         return (unsigned)r;
 #   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* GCC Intrinsic */
-        return 31 - __builtin_clz(val);
+        return __builtin_clz (val) ^ 31;
 #   elif defined(__ICCARM__)    /* IAR Intrinsic */
         return 31 - __CLZ(val);
 #   else   /* Software version */
--- a/contrib/python-zstandard/zstd/compress/zstd_compress.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress.c	Tue Jan 21 13:14:51 2020 -0500
@@ -42,15 +42,15 @@
 *  Context memory management
 ***************************************/
 struct ZSTD_CDict_s {
-    void* dictBuffer;
     const void* dictContent;
     size_t dictContentSize;
-    void* workspace;
-    size_t workspaceSize;
+    U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
+    ZSTD_cwksp workspace;
     ZSTD_matchState_t matchState;
     ZSTD_compressedBlockState_t cBlockState;
     ZSTD_customMem customMem;
     U32 dictID;
+    int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
 };  /* typedef'd to ZSTD_CDict within "zstd.h" */
 
 ZSTD_CCtx* ZSTD_createCCtx(void)
@@ -84,23 +84,26 @@
 
 ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
 {
-    ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
+    ZSTD_cwksp ws;
+    ZSTD_CCtx* cctx;
     if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
     if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
-    memset(workspace, 0, workspaceSize);   /* may be a bit generous, could memset be smaller ? */
+    ZSTD_cwksp_init(&ws, workspace, workspaceSize);
+
+    cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
+    if (cctx == NULL) {
+        return NULL;
+    }
+    memset(cctx, 0, sizeof(ZSTD_CCtx));
+    ZSTD_cwksp_move(&cctx->workspace, &ws);
     cctx->staticSize = workspaceSize;
-    cctx->workSpace = (void*)(cctx+1);
-    cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
 
     /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
-    if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL;
-    assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0);   /* ensure correct alignment */
-    cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace;
-    cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1;
-    {
-        void* const ptr = cctx->blockState.nextCBlock + 1;
-        cctx->entropyWorkspace = (U32*)ptr;
-    }
+    if (!ZSTD_cwksp_check_available(&cctx->workspace, HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
+    cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
+    cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
+    cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(
+        &cctx->workspace, HUF_WORKSPACE_SIZE);
     cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
     return cctx;
 }
@@ -128,11 +131,11 @@
 {
     assert(cctx != NULL);
     assert(cctx->staticSize == 0);
-    ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
     ZSTD_clearAllDicts(cctx);
 #ifdef ZSTD_MULTITHREAD
     ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
 #endif
+    ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
 }
 
 size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
@@ -140,8 +143,13 @@
     if (cctx==NULL) return 0;   /* support free on NULL */
     RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
                     "not compatible with static CCtx");
-    ZSTD_freeCCtxContent(cctx);
-    ZSTD_free(cctx, cctx->customMem);
+    {
+        int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
+        ZSTD_freeCCtxContent(cctx);
+        if (!cctxInWorkspace) {
+            ZSTD_free(cctx, cctx->customMem);
+        }
+    }
     return 0;
 }
 
@@ -160,7 +168,9 @@
 size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
 {
     if (cctx==NULL) return 0;   /* support sizeof on NULL */
-    return sizeof(*cctx) + cctx->workSpaceSize
+    /* cctx may be in the workspace */
+    return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
+           + ZSTD_cwksp_sizeof(&cctx->workspace)
            + ZSTD_sizeof_localDict(cctx->localDict)
            + ZSTD_sizeof_mtctx(cctx);
 }
@@ -229,23 +239,23 @@
     RETURN_ERROR_IF(!cctxParams, GENERIC);
     FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
     memset(cctxParams, 0, sizeof(*cctxParams));
+    assert(!ZSTD_checkCParams(params.cParams));
     cctxParams->cParams = params.cParams;
     cctxParams->fParams = params.fParams;
     cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
-    assert(!ZSTD_checkCParams(params.cParams));
     return 0;
 }
 
 /* ZSTD_assignParamsToCCtxParams() :
  * params is presumed valid at this stage */
 static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
-        ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
+        const ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
 {
-    ZSTD_CCtx_params ret = cctxParams;
+    ZSTD_CCtx_params ret = *cctxParams;
+    assert(!ZSTD_checkCParams(params.cParams));
     ret.cParams = params.cParams;
     ret.fParams = params.fParams;
     ret.compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
-    assert(!ZSTD_checkCParams(params.cParams));
     return ret;
 }
 
@@ -378,7 +388,7 @@
     case ZSTD_c_forceAttachDict:
         ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy);
         bounds.lowerBound = ZSTD_dictDefaultAttach;
-        bounds.upperBound = ZSTD_dictForceCopy;       /* note : how to ensure at compile time that this is the highest value enum ? */
+        bounds.upperBound = ZSTD_dictForceLoad;       /* note : how to ensure at compile time that this is the highest value enum ? */
         return bounds;
 
     case ZSTD_c_literalCompressionMode:
@@ -392,6 +402,11 @@
         bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
         return bounds;
 
+    case ZSTD_c_srcSizeHint:
+        bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
+        bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
+        return bounds;
+
     default:
         {   ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };
             return boundError;
@@ -448,6 +463,7 @@
     case ZSTD_c_forceAttachDict:
     case ZSTD_c_literalCompressionMode:
     case ZSTD_c_targetCBlockSize:
+    case ZSTD_c_srcSizeHint:
     default:
         return 0;
     }
@@ -494,6 +510,7 @@
     case ZSTD_c_ldmMinMatch:
     case ZSTD_c_ldmBucketSizeLog:
     case ZSTD_c_targetCBlockSize:
+    case ZSTD_c_srcSizeHint:
         break;
 
     default: RETURN_ERROR(parameter_unsupported);
@@ -517,33 +534,33 @@
         if (value) {  /* 0 : does not change current level */
             CCtxParams->compressionLevel = value;
         }
-        if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
+        if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
         return 0;  /* return type (size_t) cannot represent negative values */
     }
 
     case ZSTD_c_windowLog :
         if (value!=0)   /* 0 => use default */
             BOUNDCHECK(ZSTD_c_windowLog, value);
-        CCtxParams->cParams.windowLog = value;
+        CCtxParams->cParams.windowLog = (U32)value;
         return CCtxParams->cParams.windowLog;
 
     case ZSTD_c_hashLog :
         if (value!=0)   /* 0 => use default */
             BOUNDCHECK(ZSTD_c_hashLog, value);
-        CCtxParams->cParams.hashLog = value;
+        CCtxParams->cParams.hashLog = (U32)value;
         return CCtxParams->cParams.hashLog;
 
     case ZSTD_c_chainLog :
         if (value!=0)   /* 0 => use default */
             BOUNDCHECK(ZSTD_c_chainLog, value);
-        CCtxParams->cParams.chainLog = value;
+        CCtxParams->cParams.chainLog = (U32)value;
         return CCtxParams->cParams.chainLog;
 
     case ZSTD_c_searchLog :
         if (value!=0)   /* 0 => use default */
             BOUNDCHECK(ZSTD_c_searchLog, value);
-        CCtxParams->cParams.searchLog = value;
-        return value;
+        CCtxParams->cParams.searchLog = (U32)value;
+        return (size_t)value;
 
     case ZSTD_c_minMatch :
         if (value!=0)   /* 0 => use default */
@@ -674,6 +691,12 @@
         CCtxParams->targetCBlockSize = value;
         return CCtxParams->targetCBlockSize;
 
+    case ZSTD_c_srcSizeHint :
+        if (value!=0)    /* 0 ==> default */
+            BOUNDCHECK(ZSTD_c_srcSizeHint, value);
+        CCtxParams->srcSizeHint = value;
+        return CCtxParams->srcSizeHint;
+
     default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
     }
 }
@@ -779,6 +802,9 @@
     case ZSTD_c_targetCBlockSize :
         *value = (int)CCtxParams->targetCBlockSize;
         break;
+    case ZSTD_c_srcSizeHint :
+        *value = (int)CCtxParams->srcSizeHint;
+        break;
     default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
     }
     return 0;
@@ -1029,7 +1055,11 @@
 ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
         const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
 {
-    ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
+    ZSTD_compressionParameters cParams;
+    if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
+      srcSizeHint = CCtxParams->srcSizeHint;
+    }
+    cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
     if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
     if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
     if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
@@ -1049,10 +1079,19 @@
     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
     size_t const hSize = ((size_t)1) << cParams->hashLog;
     U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
-    size_t const h3Size = ((size_t)1) << hashLog3;
-    size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
-    size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
-                          + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
+    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
+    /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
+     * surrounded by redzones in ASAN. */
+    size_t const tableSpace = chainSize * sizeof(U32)
+                            + hSize * sizeof(U32)
+                            + h3Size * sizeof(U32);
+    size_t const optPotentialSpace =
+        ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
+      + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
+      + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
+      + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
+      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
     size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
                                 ? optPotentialSpace
                                 : 0;
@@ -1069,20 +1108,23 @@
         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
         U32    const divider = (cParams.minMatch==3) ? 3 : 4;
         size_t const maxNbSeq = blockSize / divider;
-        size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
-        size_t const entropySpace = HUF_WORKSPACE_SIZE;
-        size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
+        size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
+                                + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
+                                + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
+        size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE);
+        size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
         size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
 
         size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams);
-        size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq);
+        size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq));
 
         size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace +
                                    matchStateSize + ldmSpace + ldmSeqSpace;
-
-        DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
-        DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
-        return sizeof(ZSTD_CCtx) + neededSpace;
+        size_t const cctxSpace = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx));
+
+        DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)cctxSpace);
+        DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
+        return cctxSpace + neededSpace;
     }
 }
 
@@ -1118,7 +1160,8 @@
         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
         size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
         size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
-        size_t const streamingSize = inBuffSize + outBuffSize;
+        size_t const streamingSize = ZSTD_cwksp_alloc_size(inBuffSize)
+                                   + ZSTD_cwksp_alloc_size(outBuffSize);
 
         return CCtxSize + streamingSize;
     }
@@ -1186,17 +1229,6 @@
     return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
 }
 
-
-
-static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
-                                  ZSTD_compressionParameters cParams2)
-{
-    return (cParams1.hashLog  == cParams2.hashLog)
-         & (cParams1.chainLog == cParams2.chainLog)
-         & (cParams1.strategy == cParams2.strategy)   /* opt parser space */
-         & ((cParams1.minMatch==3) == (cParams2.minMatch==3));  /* hashlog3 space */
-}
-
 static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
                                     ZSTD_compressionParameters cParams2)
 {
@@ -1211,71 +1243,6 @@
     assert(cParams1.strategy     == cParams2.strategy);
 }
 
-/** The parameters are equivalent if ldm is not enabled in both sets or
- *  all the parameters are equivalent. */
-static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
-                                    ldmParams_t ldmParams2)
-{
-    return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
-           (ldmParams1.enableLdm == ldmParams2.enableLdm &&
-            ldmParams1.hashLog == ldmParams2.hashLog &&
-            ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
-            ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
-            ldmParams1.hashRateLog == ldmParams2.hashRateLog);
-}
-
-typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
-
-/* ZSTD_sufficientBuff() :
- * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
- * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
-static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
-                            size_t maxNbLit1,
-                            ZSTD_buffered_policy_e buffPol2,
-                            ZSTD_compressionParameters cParams2,
-                            U64 pledgedSrcSize)
-{
-    size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
-    size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
-    size_t const maxNbSeq2 = blockSize2 / ((cParams2.minMatch == 3) ? 3 : 4);
-    size_t const maxNbLit2 = blockSize2;
-    size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
-    DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
-                (U32)neededBufferSize2, (U32)bufferSize1);
-    DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
-                (U32)maxNbSeq2, (U32)maxNbSeq1);
-    DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
-                (U32)maxNbLit2, (U32)maxNbLit1);
-    return (maxNbLit2 <= maxNbLit1)
-         & (maxNbSeq2 <= maxNbSeq1)
-         & (neededBufferSize2 <= bufferSize1);
-}
-
-/** Equivalence for resetCCtx purposes */
-static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
-                                 ZSTD_CCtx_params params2,
-                                 size_t buffSize1,
-                                 size_t maxNbSeq1, size_t maxNbLit1,
-                                 ZSTD_buffered_policy_e buffPol2,
-                                 U64 pledgedSrcSize)
-{
-    DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
-    if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
-      DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
-      return 0;
-    }
-    if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
-      DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
-      return 0;
-    }
-    if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
-                             params2.cParams, pledgedSrcSize)) {
-      DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
-      return 0;
-    }
-    return 1;
-}
-
 static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
 {
     int i;
@@ -1301,87 +1268,104 @@
     ms->dictMatchState = NULL;
 }
 
-/*! ZSTD_continueCCtx() :
- *  reuse CCtx without reset (note : requires no dictionary) */
-static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
-{
-    size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
-    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
-    DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place");
-
-    cctx->blockSize = blockSize;   /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
-    cctx->appliedParams = params;
-    cctx->blockState.matchState.cParams = params.cParams;
-    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
-    cctx->consumedSrcSize = 0;
-    cctx->producedCSize = 0;
-    if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
-        cctx->appliedParams.fParams.contentSizeFlag = 0;
-    DEBUGLOG(4, "pledged content size : %u ; flag : %u",
-        (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
-    cctx->stage = ZSTDcs_init;
-    cctx->dictID = 0;
-    if (params.ldmParams.enableLdm)
-        ZSTD_window_clear(&cctx->ldmState.window);
-    ZSTD_referenceExternalSequences(cctx, NULL, 0);
-    ZSTD_invalidateMatchState(&cctx->blockState.matchState);
-    ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock);
-    XXH64_reset(&cctx->xxhState, 0);
-    return 0;
-}
-
-typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
-
-typedef enum { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e;
-
-static void*
+/**
+ * Indicates whether this compression proceeds directly from user-provided
+ * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
+ * whether the context needs to buffer the input/output (ZSTDb_buffered).
+ */
+typedef enum {
+    ZSTDb_not_buffered,
+    ZSTDb_buffered
+} ZSTD_buffered_policy_e;
+
+/**
+ * Controls, for this matchState reset, whether the tables need to be cleared /
+ * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
+ * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
+ * subsequent operation will overwrite the table space anyways (e.g., copying
+ * the matchState contents in from a CDict).
+ */
+typedef enum {
+    ZSTDcrp_makeClean,
+    ZSTDcrp_leaveDirty
+} ZSTD_compResetPolicy_e;
+
+/**
+ * Controls, for this matchState reset, whether indexing can continue where it
+ * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
+ * (ZSTDirp_reset).
+ */
+typedef enum {
+    ZSTDirp_continue,
+    ZSTDirp_reset
+} ZSTD_indexResetPolicy_e;
+
+typedef enum {
+    ZSTD_resetTarget_CDict,
+    ZSTD_resetTarget_CCtx
+} ZSTD_resetTarget_e;
+
+static size_t
 ZSTD_reset_matchState(ZSTD_matchState_t* ms,
-                      void* ptr,
+                      ZSTD_cwksp* ws,
                 const ZSTD_compressionParameters* cParams,
-                      ZSTD_compResetPolicy_e const crp, ZSTD_resetTarget_e const forWho)
+                const ZSTD_compResetPolicy_e crp,
+                const ZSTD_indexResetPolicy_e forceResetIndex,
+                const ZSTD_resetTarget_e forWho)
 {
     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
     size_t const hSize = ((size_t)1) << cParams->hashLog;
     U32    const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
-    size_t const h3Size = ((size_t)1) << hashLog3;
-    size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
-
-    assert(((size_t)ptr & 3) == 0);
+    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
+
+    DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
+    if (forceResetIndex == ZSTDirp_reset) {
+        memset(&ms->window, 0, sizeof(ms->window));
+        ms->window.dictLimit = 1;    /* start from 1, so that 1st position is valid */
+        ms->window.lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
+        ms->window.nextSrc = ms->window.base + 1;   /* see issue #1241 */
+        ZSTD_cwksp_mark_tables_dirty(ws);
+    }
 
     ms->hashLog3 = hashLog3;
-    memset(&ms->window, 0, sizeof(ms->window));
-    ms->window.dictLimit = 1;    /* start from 1, so that 1st position is valid */
-    ms->window.lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
-    ms->window.nextSrc = ms->window.base + 1;   /* see issue #1241 */
+
     ZSTD_invalidateMatchState(ms);
 
+    assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
+
+    ZSTD_cwksp_clear_tables(ws);
+
+    DEBUGLOG(5, "reserving table space");
+    /* table Space */
+    ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
+    ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
+    ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
+    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
+                    "failed a workspace allocation in ZSTD_reset_matchState");
+
+    DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
+    if (crp!=ZSTDcrp_leaveDirty) {
+        /* reset tables only */
+        ZSTD_cwksp_clean_tables(ws);
+    }
+
     /* opt parser space */
     if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
         DEBUGLOG(4, "reserving optimal parser space");
-        ms->opt.litFreq = (unsigned*)ptr;
-        ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
-        ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
-        ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
-        ptr = ms->opt.offCodeFreq + (MaxOff+1);
-        ms->opt.matchTable = (ZSTD_match_t*)ptr;
-        ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1;
-        ms->opt.priceTable = (ZSTD_optimal_t*)ptr;
-        ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1;
+        ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
+        ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
+        ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
+        ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
+        ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
+        ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
     }
 
-    /* table Space */
-    DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset);
-    assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
-    if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace);   /* reset tables only */
-    ms->hashTable = (U32*)(ptr);
-    ms->chainTable = ms->hashTable + hSize;
-    ms->hashTable3 = ms->chainTable + chainSize;
-    ptr = ms->hashTable3 + h3Size;
-
     ms->cParams = *cParams;
 
-    assert(((size_t)ptr & 3) == 0);
-    return ptr;
+    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
+                    "failed a workspace allocation in ZSTD_reset_matchState");
+
+    return 0;
 }
 
 /* ZSTD_indexTooCloseToMax() :
@@ -1397,13 +1381,6 @@
     return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
 }
 
-#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
-#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128  /* when workspace is continuously too large
-                                         * during at least this number of times,
-                                         * context's memory usage is considered wasteful,
-                                         * because it's sized to handle a worst case scenario which rarely happens.
-                                         * In which case, resize it down to free some memory */
-
 /*! ZSTD_resetCCtx_internal() :
     note : `params` are assumed fully validated at this stage */
 static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
@@ -1412,30 +1389,12 @@
                                       ZSTD_compResetPolicy_e const crp,
                                       ZSTD_buffered_policy_e const zbuff)
 {
+    ZSTD_cwksp* const ws = &zc->workspace;
     DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
                 (U32)pledgedSrcSize, params.cParams.windowLog);
     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
 
-    if (crp == ZSTDcrp_continue) {
-        if (ZSTD_equivalentParams(zc->appliedParams, params,
-                                  zc->inBuffSize,
-                                  zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
-                                  zbuff, pledgedSrcSize) ) {
-            DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> consider continue mode");
-            zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0);   /* if it was too large, it still is */
-            if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION) {
-                DEBUGLOG(4, "continue mode confirmed (wLog1=%u, blockSize1=%zu)",
-                            zc->appliedParams.cParams.windowLog, zc->blockSize);
-                if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) {
-                    /* prefer a reset, faster than a rescale */
-                    ZSTD_reset_matchState(&zc->blockState.matchState,
-                                           zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
-                                          &params.cParams,
-                                           crp, ZSTD_resetTarget_CCtx);
-                }
-                return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
-    }   }   }
-    DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
+    zc->isFirstBlock = 1;
 
     if (params.ldmParams.enableLdm) {
         /* Adjust long distance matching parameters */
@@ -1449,58 +1408,74 @@
         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
         U32    const divider = (params.cParams.minMatch==3) ? 3 : 4;
         size_t const maxNbSeq = blockSize / divider;
-        size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
+        size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
+                                + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
+                                + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
         size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
         size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
         size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
         size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
-        void* ptr;   /* used to partition workSpace */
-
-        /* Check if workSpace is large enough, alloc a new one if needed */
-        {   size_t const entropySpace = HUF_WORKSPACE_SIZE;
-            size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
-            size_t const bufferSpace = buffInSize + buffOutSize;
+
+        ZSTD_indexResetPolicy_e needsIndexReset = ZSTDirp_continue;
+
+        if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) {
+            needsIndexReset = ZSTDirp_reset;
+        }
+
+        ZSTD_cwksp_bump_oversized_duration(ws, 0);
+
+        /* Check if workspace is large enough, alloc a new one if needed */
+        {   size_t const cctxSpace = zc->staticSize ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
+            size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE);
+            size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
+            size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize);
             size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams);
-            size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq);
-
-            size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
-                                       ldmSeqSpace + matchStateSize + tokenSpace +
-                                       bufferSpace;
-
-            int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
-            int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
-            int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
-            zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
+            size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq));
+
+            size_t const neededSpace =
+                cctxSpace +
+                entropySpace +
+                blockStateSpace +
+                ldmSpace +
+                ldmSeqSpace +
+                matchStateSize +
+                tokenSpace +
+                bufferSpace;
+
+            int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
+            int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
 
             DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
                         neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
             DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
 
-            if (workSpaceTooSmall || workSpaceWasteful) {
-                DEBUGLOG(4, "Resize workSpaceSize from %zuKB to %zuKB",
-                            zc->workSpaceSize >> 10,
+            if (workspaceTooSmall || workspaceWasteful) {
+                DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
+                            ZSTD_cwksp_sizeof(ws) >> 10,
                             neededSpace >> 10);
 
                 RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
 
-                zc->workSpaceSize = 0;
-                ZSTD_free(zc->workSpace, zc->customMem);
-                zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
-                RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation);
-                zc->workSpaceSize = neededSpace;
-                zc->workSpaceOversizedDuration = 0;
-
+                needsIndexReset = ZSTDirp_reset;
+
+                ZSTD_cwksp_free(ws, zc->customMem);
+                FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem));
+
+                DEBUGLOG(5, "reserving object space");
                 /* Statically sized space.
                  * entropyWorkspace never moves,
                  * though prev/next block swap places */
-                assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
-                assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
-                zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
-                zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1;
-                ptr = zc->blockState.nextCBlock + 1;
-                zc->entropyWorkspace = (U32*)ptr;
+                assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
+                zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
+                RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
+                zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
+                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
+                zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, HUF_WORKSPACE_SIZE);
+                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
         }   }
 
+        ZSTD_cwksp_clear(ws);
+
         /* init params */
         zc->appliedParams = params;
         zc->blockState.matchState.cParams = params.cParams;
@@ -1519,58 +1494,58 @@
 
         ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
 
-        ptr = ZSTD_reset_matchState(&zc->blockState.matchState,
-                                     zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
-                                    &params.cParams,
-                                     crp, ZSTD_resetTarget_CCtx);
+        /* ZSTD_wildcopy() is used to copy into the literals buffer,
+         * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
+         */
+        zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
+        zc->seqStore.maxNbLit = blockSize;
+
+        /* buffers */
+        zc->inBuffSize = buffInSize;
+        zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
+        zc->outBuffSize = buffOutSize;
+        zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
+
+        /* ldm bucketOffsets table */
+        if (params.ldmParams.enableLdm) {
+            /* TODO: avoid memset? */
+            size_t const ldmBucketSize =
+                  ((size_t)1) << (params.ldmParams.hashLog -
+                                  params.ldmParams.bucketSizeLog);
+            zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, ldmBucketSize);
+            memset(zc->ldmState.bucketOffsets, 0, ldmBucketSize);
+        }
+
+        /* sequences storage */
+        ZSTD_referenceExternalSequences(zc, NULL, 0);
+        zc->seqStore.maxNbSeq = maxNbSeq;
+        zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+        zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+        zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+        zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
+
+        FORWARD_IF_ERROR(ZSTD_reset_matchState(
+            &zc->blockState.matchState,
+            ws,
+            &params.cParams,
+            crp,
+            needsIndexReset,
+            ZSTD_resetTarget_CCtx));
 
         /* ldm hash table */
-        /* initialize bucketOffsets table later for pointer alignment */
         if (params.ldmParams.enableLdm) {
+            /* TODO: avoid memset? */
             size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
-            memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
-            assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
-            zc->ldmState.hashTable = (ldmEntry_t*)ptr;
-            ptr = zc->ldmState.hashTable + ldmHSize;
-            zc->ldmSequences = (rawSeq*)ptr;
-            ptr = zc->ldmSequences + maxNbLdmSeq;
+            zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
+            memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
+            zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
             zc->maxNbLdmSequences = maxNbLdmSeq;
 
             memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window));
-        }
-        assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
-
-        /* sequences storage */
-        zc->seqStore.maxNbSeq = maxNbSeq;
-        zc->seqStore.sequencesStart = (seqDef*)ptr;
-        ptr = zc->seqStore.sequencesStart + maxNbSeq;
-        zc->seqStore.llCode = (BYTE*) ptr;
-        zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
-        zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
-        zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
-        /* ZSTD_wildcopy() is used to copy into the literals buffer,
-         * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
-         */
-        zc->seqStore.maxNbLit = blockSize;
-        ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
-
-        /* ldm bucketOffsets table */
-        if (params.ldmParams.enableLdm) {
-            size_t const ldmBucketSize =
-                  ((size_t)1) << (params.ldmParams.hashLog -
-                                  params.ldmParams.bucketSizeLog);
-            memset(ptr, 0, ldmBucketSize);
-            zc->ldmState.bucketOffsets = (BYTE*)ptr;
-            ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
             ZSTD_window_clear(&zc->ldmState.window);
         }
-        ZSTD_referenceExternalSequences(zc, NULL, 0);
-
-        /* buffers */
-        zc->inBuffSize = buffInSize;
-        zc->inBuff = (char*)ptr;
-        zc->outBuffSize = buffOutSize;
-        zc->outBuff = zc->inBuff + buffInSize;
+
+        DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
 
         return 0;
     }
@@ -1604,15 +1579,15 @@
 };
 
 static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
-                                 ZSTD_CCtx_params params,
+                                 const ZSTD_CCtx_params* params,
                                  U64 pledgedSrcSize)
 {
     size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
     return ( pledgedSrcSize <= cutoff
           || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
-          || params.attachDictPref == ZSTD_dictForceAttach )
-        && params.attachDictPref != ZSTD_dictForceCopy
-        && !params.forceWindow; /* dictMatchState isn't correctly
+          || params->attachDictPref == ZSTD_dictForceAttach )
+        && params->attachDictPref != ZSTD_dictForceCopy
+        && !params->forceWindow; /* dictMatchState isn't correctly
                                  * handled in _enforceMaxDist */
 }
 
@@ -1630,8 +1605,8 @@
          * has its own tables. */
         params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);
         params.cParams.windowLog = windowLog;
-        ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
-                                ZSTDcrp_continue, zbuff);
+        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+                                                 ZSTDcrp_makeClean, zbuff));
         assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
     }
 
@@ -1679,30 +1654,36 @@
         /* Copy only compression parameters related to tables. */
         params.cParams = *cdict_cParams;
         params.cParams.windowLog = windowLog;
-        ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
-                                ZSTDcrp_noMemset, zbuff);
+        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+                                                 ZSTDcrp_leaveDirty, zbuff));
         assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
         assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
         assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
     }
 
+    ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
+
     /* copy tables */
     {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
         size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
-        size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
-        assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
-        assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
-        assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize);  /* chainTable must follow hashTable */
-        assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
-        memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
+
+        memcpy(cctx->blockState.matchState.hashTable,
+               cdict->matchState.hashTable,
+               hSize * sizeof(U32));
+        memcpy(cctx->blockState.matchState.chainTable,
+               cdict->matchState.chainTable,
+               chainSize * sizeof(U32));
     }
 
     /* Zero the hashTable3, since the cdict never fills it */
-    {   size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
+    {   int const h3log = cctx->blockState.matchState.hashLog3;
+        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
         assert(cdict->matchState.hashLog3 == 0);
         memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
     }
 
+    ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
+
     /* copy dictionary offsets */
     {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
         ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
@@ -1724,7 +1705,7 @@
  * in-place. We decide here which strategy to use. */
 static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
                             const ZSTD_CDict* cdict,
-                            ZSTD_CCtx_params params,
+                            const ZSTD_CCtx_params* params,
                             U64 pledgedSrcSize,
                             ZSTD_buffered_policy_e zbuff)
 {
@@ -1734,10 +1715,10 @@
 
     if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
         return ZSTD_resetCCtx_byAttachingCDict(
-            cctx, cdict, params, pledgedSrcSize, zbuff);
+            cctx, cdict, *params, pledgedSrcSize, zbuff);
     } else {
         return ZSTD_resetCCtx_byCopyingCDict(
-            cctx, cdict, params, pledgedSrcSize, zbuff);
+            cctx, cdict, *params, pledgedSrcSize, zbuff);
     }
 }
 
@@ -1763,7 +1744,7 @@
         params.cParams = srcCCtx->appliedParams.cParams;
         params.fParams = fParams;
         ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
-                                ZSTDcrp_noMemset, zbuff);
+                                ZSTDcrp_leaveDirty, zbuff);
         assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
         assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
         assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
@@ -1771,16 +1752,27 @@
         assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
     }
 
+    ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
+
     /* copy tables */
     {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
         size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
-        size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3;
-        size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
-        assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
-        assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize);
-        memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
+        int const h3log = srcCCtx->blockState.matchState.hashLog3;
+        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
+
+        memcpy(dstCCtx->blockState.matchState.hashTable,
+               srcCCtx->blockState.matchState.hashTable,
+               hSize * sizeof(U32));
+        memcpy(dstCCtx->blockState.matchState.chainTable,
+               srcCCtx->blockState.matchState.chainTable,
+               chainSize * sizeof(U32));
+        memcpy(dstCCtx->blockState.matchState.hashTable3,
+               srcCCtx->blockState.matchState.hashTable3,
+               h3Size * sizeof(U32));
     }
 
+    ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
+
     /* copy dictionary offsets */
     {
         const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
@@ -1831,6 +1823,20 @@
     int rowNb;
     assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
     assert(size < (1U<<31));   /* can be casted to int */
+
+#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
+    /* To validate that the table re-use logic is sound, and that we don't
+     * access table space that we haven't cleaned, we re-"poison" the table
+     * space every time we mark it dirty.
+     *
+     * This function however is intended to operate on those dirty tables and
+     * re-clean them. So when this function is used correctly, we can unpoison
+     * the memory it operated on. This introduces a blind spot though, since
+     * if we now try to operate on __actually__ poisoned memory, we will not
+     * detect that. */
+    __msan_unpoison(table, size * sizeof(U32));
+#endif
+
     for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
         int column;
         for (column=0; column<ZSTD_ROWSIZE; column++) {
@@ -1938,7 +1944,7 @@
                                 ZSTD_entropyCTables_t* nextEntropy,
                           const ZSTD_CCtx_params* cctxParams,
                                 void* dst, size_t dstCapacity,
-                                void* workspace, size_t wkspSize,
+                                void* entropyWorkspace, size_t entropyWkspSize,
                           const int bmi2)
 {
     const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
@@ -1971,7 +1977,7 @@
                                     ZSTD_disableLiteralsCompression(cctxParams),
                                     op, dstCapacity,
                                     literals, litSize,
-                                    workspace, wkspSize,
+                                    entropyWorkspace, entropyWkspSize,
                                     bmi2);
         FORWARD_IF_ERROR(cSize);
         assert(cSize <= dstCapacity);
@@ -1981,12 +1987,17 @@
     /* Sequences Header */
     RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
                     dstSize_tooSmall);
-    if (nbSeq < 0x7F)
+    if (nbSeq < 128) {
         *op++ = (BYTE)nbSeq;
-    else if (nbSeq < LONGNBSEQ)
-        op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
-    else
-        op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
+    } else if (nbSeq < LONGNBSEQ) {
+        op[0] = (BYTE)((nbSeq>>8) + 0x80);
+        op[1] = (BYTE)nbSeq;
+        op+=2;
+    } else {
+        op[0]=0xFF;
+        MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
+        op+=3;
+    }
     assert(op <= oend);
     if (nbSeq==0) {
         /* Copy the old tables over as if we repeated them */
@@ -2002,7 +2013,7 @@
     ZSTD_seqToCodes(seqStorePtr);
     /* build CTable for Literal Lengths */
     {   unsigned max = MaxLL;
-        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
+        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */
         DEBUGLOG(5, "Building LL table");
         nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
         LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
@@ -2012,10 +2023,14 @@
                                         ZSTD_defaultAllowed, strategy);
         assert(set_basic < set_compressed && set_rle < set_compressed);
         assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
-        {   size_t const countSize = ZSTD_buildCTable(op, (size_t)(oend - op), CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
-                                                    count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
-                                                    prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
-                                                    workspace, wkspSize);
+        {   size_t const countSize = ZSTD_buildCTable(
+                op, (size_t)(oend - op),
+                CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
+                count, max, llCodeTable, nbSeq,
+                LL_defaultNorm, LL_defaultNormLog, MaxLL,
+                prevEntropy->fse.litlengthCTable,
+                sizeof(prevEntropy->fse.litlengthCTable),
+                entropyWorkspace, entropyWkspSize);
             FORWARD_IF_ERROR(countSize);
             if (LLtype == set_compressed)
                 lastNCount = op;
@@ -2024,7 +2039,8 @@
     }   }
     /* build CTable for Offsets */
     {   unsigned max = MaxOff;
-        size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
+        size_t const mostFrequent = HIST_countFast_wksp(
+            count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);  /* can't fail */
         /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
         ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
         DEBUGLOG(5, "Building OF table");
@@ -2035,10 +2051,14 @@
                                         OF_defaultNorm, OF_defaultNormLog,
                                         defaultPolicy, strategy);
         assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
-        {   size_t const countSize = ZSTD_buildCTable(op, (size_t)(oend - op), CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
-                                                    count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
-                                                    prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
-                                                    workspace, wkspSize);
+        {   size_t const countSize = ZSTD_buildCTable(
+                op, (size_t)(oend - op),
+                CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
+                count, max, ofCodeTable, nbSeq,
+                OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+                prevEntropy->fse.offcodeCTable,
+                sizeof(prevEntropy->fse.offcodeCTable),
+                entropyWorkspace, entropyWkspSize);
             FORWARD_IF_ERROR(countSize);
             if (Offtype == set_compressed)
                 lastNCount = op;
@@ -2047,7 +2067,8 @@
     }   }
     /* build CTable for MatchLengths */
     {   unsigned max = MaxML;
-        size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
+        size_t const mostFrequent = HIST_countFast_wksp(
+            count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */
         DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
         nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
         MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
@@ -2056,10 +2077,14 @@
                                         ML_defaultNorm, ML_defaultNormLog,
                                         ZSTD_defaultAllowed, strategy);
         assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
-        {   size_t const countSize = ZSTD_buildCTable(op, (size_t)(oend - op), CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
-                                                    count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
-                                                    prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
-                                                    workspace, wkspSize);
+        {   size_t const countSize = ZSTD_buildCTable(
+                op, (size_t)(oend - op),
+                CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
+                count, max, mlCodeTable, nbSeq,
+                ML_defaultNorm, ML_defaultNormLog, MaxML,
+                prevEntropy->fse.matchlengthCTable,
+                sizeof(prevEntropy->fse.matchlengthCTable),
+                entropyWorkspace, entropyWkspSize);
             FORWARD_IF_ERROR(countSize);
             if (MLtype == set_compressed)
                 lastNCount = op;
@@ -2107,13 +2132,13 @@
                        const ZSTD_CCtx_params* cctxParams,
                              void* dst, size_t dstCapacity,
                              size_t srcSize,
-                             void* workspace, size_t wkspSize,
+                             void* entropyWorkspace, size_t entropyWkspSize,
                              int bmi2)
 {
     size_t const cSize = ZSTD_compressSequences_internal(
                             seqStorePtr, prevEntropy, nextEntropy, cctxParams,
                             dst, dstCapacity,
-                            workspace, wkspSize, bmi2);
+                            entropyWorkspace, entropyWkspSize, bmi2);
     if (cSize == 0) return 0;
     /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
      * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
@@ -2264,11 +2289,99 @@
     return ZSTDbss_compress;
 }
 
+static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
+{
+    const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
+    const seqDef* seqs = seqStore->sequencesStart;
+    size_t seqsSize = seqStore->sequences - seqs;
+
+    ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
+    size_t i; size_t position; int repIdx;
+
+    assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
+    for (i = 0, position = 0; i < seqsSize; ++i) {
+        outSeqs[i].offset = seqs[i].offset;
+        outSeqs[i].litLength = seqs[i].litLength;
+        outSeqs[i].matchLength = seqs[i].matchLength + MINMATCH;
+
+        if (i == seqStore->longLengthPos) {
+            if (seqStore->longLengthID == 1) {
+                outSeqs[i].litLength += 0x10000;
+            } else if (seqStore->longLengthID == 2) {
+                outSeqs[i].matchLength += 0x10000;
+            }
+        }
+
+        if (outSeqs[i].offset <= ZSTD_REP_NUM) {
+            outSeqs[i].rep = outSeqs[i].offset;
+            repIdx = (unsigned int)i - outSeqs[i].offset;
+
+            if (outSeqs[i].litLength == 0) {
+                if (outSeqs[i].offset < 3) {
+                    --repIdx;
+                } else {
+                    repIdx = (unsigned int)i - 1;
+                }
+                ++outSeqs[i].rep;
+            }
+            assert(repIdx >= -3);
+            outSeqs[i].offset = repIdx >= 0 ? outSeqs[repIdx].offset : repStartValue[-repIdx - 1];
+            if (outSeqs[i].rep == 4) {
+                --outSeqs[i].offset;
+            }
+        } else {
+            outSeqs[i].offset -= ZSTD_REP_NUM;
+        }
+
+        position += outSeqs[i].litLength;
+        outSeqs[i].matchPos = (unsigned int)position;
+        position += outSeqs[i].matchLength;
+    }
+    zc->seqCollector.seqIndex += seqsSize;
+}
+
+size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+    size_t outSeqsSize, const void* src, size_t srcSize)
+{
+    const size_t dstCapacity = ZSTD_compressBound(srcSize);
+    void* dst = ZSTD_malloc(dstCapacity, ZSTD_defaultCMem);
+    SeqCollector seqCollector;
+
+    RETURN_ERROR_IF(dst == NULL, memory_allocation);
+
+    seqCollector.collectSequences = 1;
+    seqCollector.seqStart = outSeqs;
+    seqCollector.seqIndex = 0;
+    seqCollector.maxSequences = outSeqsSize;
+    zc->seqCollector = seqCollector;
+
+    ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
+    ZSTD_free(dst, ZSTD_defaultCMem);
+    return zc->seqCollector.seqIndex;
+}
+
+/* Returns true if the given block is a RLE block */
+static int ZSTD_isRLE(const BYTE *ip, size_t length) {
+    size_t i;
+    if (length < 2) return 1;
+    for (i = 1; i < length; ++i) {
+        if (ip[0] != ip[i]) return 0;
+    }
+    return 1;
+}
+
 static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
                                         void* dst, size_t dstCapacity,
-                                        const void* src, size_t srcSize)
+                                        const void* src, size_t srcSize, U32 frame)
 {
+    /* This the upper bound for the length of an rle block.
+     * This isn't the actual upper bound. Finding the real threshold
+     * needs further investigation.
+     */
+    const U32 rleMaxLength = 25;
     size_t cSize;
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
     DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
                 (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
                 (unsigned)zc->blockState.matchState.nextToUpdate);
@@ -2278,6 +2391,11 @@
         if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
     }
 
+    if (zc->seqCollector.collectSequences) {
+        ZSTD_copyBlockSequences(zc);
+        return 0;
+    }
+
     /* encode sequences and literals */
     cSize = ZSTD_compressSequences(&zc->seqStore,
             &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
@@ -2287,8 +2405,21 @@
             zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
             zc->bmi2);
 
+    if (frame &&
+        /* We don't want to emit our first block as a RLE even if it qualifies because
+         * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+         * This is only an issue for zstd <= v1.4.3
+         */
+        !zc->isFirstBlock &&
+        cSize < rleMaxLength &&
+        ZSTD_isRLE(ip, srcSize))
+    {
+        cSize = 1;
+        op[0] = ip[0];
+    }
+
 out:
-    if (!ZSTD_isError(cSize) && cSize != 0) {
+    if (!ZSTD_isError(cSize) && cSize > 1) {
         /* confirm repcodes and entropy tables when emitting a compressed block */
         ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
         zc->blockState.prevCBlock = zc->blockState.nextCBlock;
@@ -2305,7 +2436,11 @@
 }
 
 
-static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, void const* ip, void const* iend)
+static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
+                                         ZSTD_cwksp* ws,
+                                         ZSTD_CCtx_params const* params,
+                                         void const* ip,
+                                         void const* iend)
 {
     if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
         U32 const maxDist = (U32)1 << params->cParams.windowLog;
@@ -2314,7 +2449,9 @@
         ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
         ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
         ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+        ZSTD_cwksp_mark_tables_dirty(ws);
         ZSTD_reduceIndex(ms, params, correction);
+        ZSTD_cwksp_mark_tables_clean(ws);
         if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
         else ms->nextToUpdate -= correction;
         /* invalidate dictionaries on overflow correction */
@@ -2323,7 +2460,6 @@
     }
 }
 
-
 /*! ZSTD_compress_frameChunk() :
 *   Compress a chunk of data into one or multiple blocks.
 *   All blocks will be terminated, all input will be consumed.
@@ -2357,7 +2493,8 @@
                         "not enough space to store compressed block");
         if (remaining < blockSize) blockSize = remaining;
 
-        ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, ip, ip + blockSize);
+        ZSTD_overflowCorrectIfNeeded(
+            ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
         ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
 
         /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
@@ -2365,15 +2502,16 @@
 
         {   size_t cSize = ZSTD_compressBlock_internal(cctx,
                                 op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
-                                ip, blockSize);
+                                ip, blockSize, 1 /* frame */);
             FORWARD_IF_ERROR(cSize);
-
             if (cSize == 0) {  /* block is not compressible */
                 cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
                 FORWARD_IF_ERROR(cSize);
             } else {
-                U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
-                MEM_writeLE24(op, cBlockHeader24);
+                const U32 cBlockHeader = cSize == 1 ?
+                    lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
+                    lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+                MEM_writeLE24(op, cBlockHeader);
                 cSize += ZSTD_blockHeaderSize;
             }
 
@@ -2383,6 +2521,7 @@
             op += cSize;
             assert(dstCapacity >= cSize);
             dstCapacity -= cSize;
+            cctx->isFirstBlock = 0;
             DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
                         (unsigned)cSize);
     }   }
@@ -2393,25 +2532,25 @@
 
 
 static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
-                                    ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
+                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
 {   BYTE* const op = (BYTE*)dst;
     U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
-    U32   const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
-    U32   const checksumFlag = params.fParams.checksumFlag>0;
-    U32   const windowSize = (U32)1 << params.cParams.windowLog;
-    U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
-    BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
-    U32   const fcsCode = params.fParams.contentSizeFlag ?
+    U32   const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
+    U32   const checksumFlag = params->fParams.checksumFlag>0;
+    U32   const windowSize = (U32)1 << params->cParams.windowLog;
+    U32   const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
+    BYTE  const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
+    U32   const fcsCode = params->fParams.contentSizeFlag ?
                      (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
     BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
     size_t pos=0;
 
-    assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
+    assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
     RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall);
     DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
-                !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
-
-    if (params.format == ZSTD_f_zstd1) {
+                !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
+
+    if (params->format == ZSTD_f_zstd1) {
         MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
         pos = 4;
     }
@@ -2477,7 +2616,7 @@
                     "missing init (ZSTD_compressBegin)");
 
     if (frame && (cctx->stage==ZSTDcs_init)) {
-        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
+        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
                                        cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
         FORWARD_IF_ERROR(fhSize);
         assert(fhSize <= dstCapacity);
@@ -2497,13 +2636,15 @@
 
     if (!frame) {
         /* overflow check and correction for block mode */
-        ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, src, (BYTE const*)src + srcSize);
+        ZSTD_overflowCorrectIfNeeded(
+            ms, &cctx->workspace, &cctx->appliedParams,
+            src, (BYTE const*)src + srcSize);
     }
 
     DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
     {   size_t const cSize = frame ?
                              ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
-                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
+                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
         FORWARD_IF_ERROR(cSize);
         cctx->consumedSrcSize += srcSize;
         cctx->producedCSize += (cSize + fhSize);
@@ -2550,6 +2691,7 @@
  *  @return : 0, or an error code
  */
 static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+                                         ZSTD_cwksp* ws,
                                          ZSTD_CCtx_params const* params,
                                          const void* src, size_t srcSize,
                                          ZSTD_dictTableLoadMethod_e dtlm)
@@ -2570,7 +2712,7 @@
         size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
         const BYTE* const ichunk = ip + chunk;
 
-        ZSTD_overflowCorrectIfNeeded(ms, params, ip, ichunk);
+        ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
 
         switch(params->cParams.strategy)
         {
@@ -2629,10 +2771,11 @@
 /*! ZSTD_loadZstdDictionary() :
  * @return : dictID, or an error code
  *  assumptions : magic number supposed already checked
- *                dictSize supposed > 8
+ *                dictSize supposed >= 8
  */
 static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
                                       ZSTD_matchState_t* ms,
+                                      ZSTD_cwksp* ws,
                                       ZSTD_CCtx_params const* params,
                                       const void* dict, size_t dictSize,
                                       ZSTD_dictTableLoadMethod_e dtlm,
@@ -2645,7 +2788,7 @@
     size_t dictID;
 
     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
-    assert(dictSize > 8);
+    assert(dictSize >= 8);
     assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
 
     dictPtr += 4;   /* skip magic number */
@@ -2728,7 +2871,8 @@
         bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
         bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
         bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
-        FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
+        FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
+            ms, ws, params, dictPtr, dictContentSize, dtlm));
         return dictID;
     }
 }
@@ -2738,6 +2882,7 @@
 static size_t
 ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
                                ZSTD_matchState_t* ms,
+                               ZSTD_cwksp* ws,
                          const ZSTD_CCtx_params* params,
                          const void* dict, size_t dictSize,
                                ZSTD_dictContentType_e dictContentType,
@@ -2745,27 +2890,35 @@
                                void* workspace)
 {
     DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
-    if ((dict==NULL) || (dictSize<=8)) return 0;
+    if ((dict==NULL) || (dictSize<8)) {
+        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
+        return 0;
+    }
 
     ZSTD_reset_compressedBlockState(bs);
 
     /* dict restricted modes */
     if (dictContentType == ZSTD_dct_rawContent)
-        return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
+        return ZSTD_loadDictionaryContent(ms, ws, params, dict, dictSize, dtlm);
 
     if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
         if (dictContentType == ZSTD_dct_auto) {
             DEBUGLOG(4, "raw content dictionary detected");
-            return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
+            return ZSTD_loadDictionaryContent(
+                ms, ws, params, dict, dictSize, dtlm);
         }
         RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
         assert(0);   /* impossible */
     }
 
     /* dict as full zstd dictionary */
-    return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
+    return ZSTD_loadZstdDictionary(
+        bs, ms, ws, params, dict, dictSize, dtlm, workspace);
 }
 
+#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
+#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6)
+
 /*! ZSTD_compressBegin_internal() :
  * @return : 0, or an error code */
 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
@@ -2773,23 +2926,34 @@
                                     ZSTD_dictContentType_e dictContentType,
                                     ZSTD_dictTableLoadMethod_e dtlm,
                                     const ZSTD_CDict* cdict,
-                                    ZSTD_CCtx_params params, U64 pledgedSrcSize,
+                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
                                     ZSTD_buffered_policy_e zbuff)
 {
-    DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
+    DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
     /* params are supposed to be fully validated at this point */
-    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
-
-    if (cdict && cdict->dictContentSize>0) {
+    if ( (cdict)
+      && (cdict->dictContentSize > 0)
+      && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
+        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
+        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+        || cdict->compressionLevel == 0)
+      && (params->attachDictPref != ZSTD_dictForceLoad) ) {
         return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
     }
 
-    FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
-                                     ZSTDcrp_continue, zbuff) );
-    {   size_t const dictID = ZSTD_compress_insertDictionary(
-                cctx->blockState.prevCBlock, &cctx->blockState.matchState,
-                &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
+    FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
+                                     ZSTDcrp_makeClean, zbuff) );
+    {   size_t const dictID = cdict ?
+                ZSTD_compress_insertDictionary(
+                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+                        &cctx->workspace, params, cdict->dictContent, cdict->dictContentSize,
+                        dictContentType, dtlm, cctx->entropyWorkspace)
+              : ZSTD_compress_insertDictionary(
+                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+                        &cctx->workspace, params, dict, dictSize,
+                        dictContentType, dtlm, cctx->entropyWorkspace);
         FORWARD_IF_ERROR(dictID);
         assert(dictID <= UINT_MAX);
         cctx->dictID = (U32)dictID;
@@ -2802,12 +2966,12 @@
                                     ZSTD_dictContentType_e dictContentType,
                                     ZSTD_dictTableLoadMethod_e dtlm,
                                     const ZSTD_CDict* cdict,
-                                    ZSTD_CCtx_params params,
+                                    const ZSTD_CCtx_params* params,
                                     unsigned long long pledgedSrcSize)
 {
-    DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
+    DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
     /* compression parameters verification and optimization */
-    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
+    FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) );
     return ZSTD_compressBegin_internal(cctx,
                                        dict, dictSize, dictContentType, dtlm,
                                        cdict,
@@ -2822,21 +2986,21 @@
                                    ZSTD_parameters params, unsigned long long pledgedSrcSize)
 {
     ZSTD_CCtx_params const cctxParams =
-            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+            ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
     return ZSTD_compressBegin_advanced_internal(cctx,
                                             dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
                                             NULL /*cdict*/,
-                                            cctxParams, pledgedSrcSize);
+                                            &cctxParams, pledgedSrcSize);
 }
 
 size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
 {
     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
     ZSTD_CCtx_params const cctxParams =
-            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+            ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
     DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
     return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
-                                       cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
+                                       &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
 }
 
 size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
@@ -2859,7 +3023,7 @@
 
     /* special case : empty frame */
     if (cctx->stage == ZSTDcs_init) {
-        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
+        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
         FORWARD_IF_ERROR(fhSize);
         dstCapacity -= fhSize;
         op += fhSize;
@@ -2920,13 +3084,13 @@
                                       ZSTD_parameters params)
 {
     ZSTD_CCtx_params const cctxParams =
-            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+            ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
     DEBUGLOG(4, "ZSTD_compress_internal");
     return ZSTD_compress_advanced_internal(cctx,
                                            dst, dstCapacity,
                                            src, srcSize,
                                            dict, dictSize,
-                                           cctxParams);
+                                           &cctxParams);
 }
 
 size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
@@ -2950,7 +3114,7 @@
         void* dst, size_t dstCapacity,
         const void* src, size_t srcSize,
         const void* dict,size_t dictSize,
-        ZSTD_CCtx_params params)
+        const ZSTD_CCtx_params* params)
 {
     DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
     FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
@@ -2966,9 +3130,9 @@
                                int compressionLevel)
 {
     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0);
-    ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+    ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
     assert(params.fParams.contentSizeFlag == 1);
-    return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
+    return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
 }
 
 size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
@@ -3003,8 +3167,11 @@
         ZSTD_dictLoadMethod_e dictLoadMethod)
 {
     DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
-    return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
-           + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+    return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+         + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+         + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
+         + (dictLoadMethod == ZSTD_dlm_byRef ? 0
+            : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
 }
 
 size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
@@ -3017,7 +3184,9 @@
 {
     if (cdict==NULL) return 0;   /* support sizeof on NULL */
     DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
-    return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
+    /* cdict may be in the workspace */
+    return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
+        + ZSTD_cwksp_sizeof(&cdict->workspace);
 }
 
 static size_t ZSTD_initCDict_internal(
@@ -3031,28 +3200,29 @@
     assert(!ZSTD_checkCParams(cParams));
     cdict->matchState.cParams = cParams;
     if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
-        cdict->dictBuffer = NULL;
         cdict->dictContent = dictBuffer;
     } else {
-        void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
-        cdict->dictBuffer = internalBuffer;
+         void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
+        RETURN_ERROR_IF(!internalBuffer, memory_allocation);
         cdict->dictContent = internalBuffer;
-        RETURN_ERROR_IF(!internalBuffer, memory_allocation);
         memcpy(internalBuffer, dictBuffer, dictSize);
     }
     cdict->dictContentSize = dictSize;
 
+    cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
+
+
     /* Reset the state to no dictionary */
     ZSTD_reset_compressedBlockState(&cdict->cBlockState);
-    {   void* const end = ZSTD_reset_matchState(&cdict->matchState,
-                            (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
-                            &cParams,
-                             ZSTDcrp_continue, ZSTD_resetTarget_CDict);
-        assert(end == (char*)cdict->workspace + cdict->workspaceSize);
-        (void)end;
-    }
+    FORWARD_IF_ERROR(ZSTD_reset_matchState(
+        &cdict->matchState,
+        &cdict->workspace,
+        &cParams,
+        ZSTDcrp_makeClean,
+        ZSTDirp_reset,
+        ZSTD_resetTarget_CDict));
     /* (Maybe) load the dictionary
-     * Skips loading the dictionary if it is <= 8 bytes.
+     * Skips loading the dictionary if it is < 8 bytes.
      */
     {   ZSTD_CCtx_params params;
         memset(&params, 0, sizeof(params));
@@ -3060,9 +3230,9 @@
         params.fParams.contentSizeFlag = 1;
         params.cParams = cParams;
         {   size_t const dictID = ZSTD_compress_insertDictionary(
-                    &cdict->cBlockState, &cdict->matchState, &params,
-                    cdict->dictContent, cdict->dictContentSize,
-                    dictContentType, ZSTD_dtlm_full, cdict->workspace);
+                    &cdict->cBlockState, &cdict->matchState, &cdict->workspace,
+                    &params, cdict->dictContent, cdict->dictContentSize,
+                    dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
             FORWARD_IF_ERROR(dictID);
             assert(dictID <= (size_t)(U32)-1);
             cdict->dictID = (U32)dictID;
@@ -3080,18 +3250,29 @@
     DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
 
-    {   ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
-        size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
+    {   size_t const workspaceSize =
+            ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
+            ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
+            ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
+            (dictLoadMethod == ZSTD_dlm_byRef ? 0
+             : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
         void* const workspace = ZSTD_malloc(workspaceSize, customMem);
-
-        if (!cdict || !workspace) {
-            ZSTD_free(cdict, customMem);
+        ZSTD_cwksp ws;
+        ZSTD_CDict* cdict;
+
+        if (!workspace) {
             ZSTD_free(workspace, customMem);
             return NULL;
         }
+
+        ZSTD_cwksp_init(&ws, workspace, workspaceSize);
+
+        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
+        assert(cdict != NULL);
+        ZSTD_cwksp_move(&cdict->workspace, &ws);
         cdict->customMem = customMem;
-        cdict->workspace = workspace;
-        cdict->workspaceSize = workspaceSize;
+        cdict->compressionLevel = 0; /* signals advanced API usage */
+
         if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
                                         dictBuffer, dictSize,
                                         dictLoadMethod, dictContentType,
@@ -3107,9 +3288,12 @@
 ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
 {
     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
-    return ZSTD_createCDict_advanced(dict, dictSize,
-                                     ZSTD_dlm_byCopy, ZSTD_dct_auto,
-                                     cParams, ZSTD_defaultCMem);
+    ZSTD_CDict* cdict = ZSTD_createCDict_advanced(dict, dictSize,
+                                                  ZSTD_dlm_byCopy, ZSTD_dct_auto,
+                                                  cParams, ZSTD_defaultCMem);
+    if (cdict)
+        cdict->compressionLevel = compressionLevel == 0 ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
+    return cdict;
 }
 
 ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
@@ -3124,9 +3308,11 @@
 {
     if (cdict==NULL) return 0;   /* support free on NULL */
     {   ZSTD_customMem const cMem = cdict->customMem;
-        ZSTD_free(cdict->workspace, cMem);
-        ZSTD_free(cdict->dictBuffer, cMem);
-        ZSTD_free(cdict, cMem);
+        int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
+        ZSTD_cwksp_free(&cdict->workspace, cMem);
+        if (!cdictInWorkspace) {
+            ZSTD_free(cdict, cMem);
+        }
         return 0;
     }
 }
@@ -3152,28 +3338,30 @@
                                  ZSTD_compressionParameters cParams)
 {
     size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
-    size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
-                            + HUF_WORKSPACE_SIZE + matchStateSize;
-    ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
-    void* ptr;
+    size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+                            + (dictLoadMethod == ZSTD_dlm_byRef ? 0
+                               : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
+                            + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+                            + matchStateSize;
+    ZSTD_CDict* cdict;
+
     if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
+
+    {
+        ZSTD_cwksp ws;
+        ZSTD_cwksp_init(&ws, workspace, workspaceSize);
+        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
+        if (cdict == NULL) return NULL;
+        ZSTD_cwksp_move(&cdict->workspace, &ws);
+    }
+
     DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
         (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
     if (workspaceSize < neededSize) return NULL;
 
-    if (dictLoadMethod == ZSTD_dlm_byCopy) {
-        memcpy(cdict+1, dict, dictSize);
-        dict = cdict+1;
-        ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
-    } else {
-        ptr = cdict+1;
-    }
-    cdict->workspace = ptr;
-    cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize;
-
     if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
                                               dict, dictSize,
-                                              ZSTD_dlm_byRef, dictContentType,
+                                              dictLoadMethod, dictContentType,
                                               cParams) ))
         return NULL;
 
@@ -3195,7 +3383,15 @@
     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
     RETURN_ERROR_IF(cdict==NULL, dictionary_wrong);
     {   ZSTD_CCtx_params params = cctx->requestedParams;
-        params.cParams = ZSTD_getCParamsFromCDict(cdict);
+        params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
+                        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
+                        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+                        || cdict->compressionLevel == 0 )
+                      && (params.attachDictPref != ZSTD_dictForceLoad) ?
+                ZSTD_getCParamsFromCDict(cdict)
+              : ZSTD_getCParams(cdict->compressionLevel,
+                                pledgedSrcSize,
+                                cdict->dictContentSize);
         /* Increase window log to fit the entire dictionary and source if the
          * source size is known. Limit the increase to 19, which is the
          * window log for compression level 1 with the largest source size.
@@ -3209,7 +3405,7 @@
         return ZSTD_compressBegin_internal(cctx,
                                            NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
                                            cdict,
-                                           params, pledgedSrcSize,
+                                           &params, pledgedSrcSize,
                                            ZSTDb_not_buffered);
     }
 }
@@ -3300,7 +3496,7 @@
     FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
                                          dict, dictSize, dictContentType, ZSTD_dtlm_fast,
                                          cdict,
-                                         params, pledgedSrcSize,
+                                         &params, pledgedSrcSize,
                                          ZSTDb_buffered) );
 
     cctx->inToCompress = 0;
@@ -3334,13 +3530,14 @@
  *  Assumption 2 : either dict, or cdict, is defined, not both */
 size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
                     const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
-                    ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
+                    const ZSTD_CCtx_params* params,
+                    unsigned long long pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_initCStream_internal");
     FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
     FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
-    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
-    zcs->requestedParams = params;
+    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+    zcs->requestedParams = *params;
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
     if (dict) {
         FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
@@ -3379,7 +3576,7 @@
 /* ZSTD_initCStream_advanced() :
  * pledgedSrcSize must be exact.
  * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
- * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
+ * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
                                  const void* dict, size_t dictSize,
                                  ZSTD_parameters params, unsigned long long pss)
@@ -3393,7 +3590,7 @@
     FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
     FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
     FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
-    zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
+    zcs->requestedParams = ZSTD_assignParamsToCCtxParams(&zcs->requestedParams, params);
     FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
     return 0;
 }
@@ -3643,7 +3840,7 @@
             if (cctx->mtctx == NULL) {
                 DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
                             params.nbWorkers);
-                cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
+                cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem);
                 RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation);
             }
             /* mt compression */
@@ -3771,8 +3968,8 @@
     { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
     { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
     { 20, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
-    { 21, 16, 17,  1,  5,  1, ZSTD_dfast   },  /* level  3 */
-    { 21, 18, 18,  1,  5,  1, ZSTD_dfast   },  /* level  4 */
+    { 21, 16, 17,  1,  5,  0, ZSTD_dfast   },  /* level  3 */
+    { 21, 18, 18,  1,  5,  0, ZSTD_dfast   },  /* level  4 */
     { 21, 18, 19,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
     { 21, 19, 19,  3,  5,  4, ZSTD_greedy  },  /* level  6 */
     { 21, 19, 19,  3,  5,  8, ZSTD_lazy    },  /* level  7 */
@@ -3796,8 +3993,8 @@
     /* W,  C,  H,  S,  L,  T, strat */
     { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
     { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
-    { 18, 14, 14,  1,  5,  1, ZSTD_dfast   },  /* level  2 */
-    { 18, 16, 16,  1,  4,  1, ZSTD_dfast   },  /* level  3 */
+    { 18, 14, 14,  1,  5,  0, ZSTD_dfast   },  /* level  2 */
+    { 18, 16, 16,  1,  4,  0, ZSTD_dfast   },  /* level  3 */
     { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
     { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
     { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
@@ -3823,8 +4020,8 @@
     { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
     { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
     { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
-    { 17, 15, 16,  2,  5,  1, ZSTD_dfast   },  /* level  3 */
-    { 17, 17, 17,  2,  4,  1, ZSTD_dfast   },  /* level  4 */
+    { 17, 15, 16,  2,  5,  0, ZSTD_dfast   },  /* level  3 */
+    { 17, 17, 17,  2,  4,  0, ZSTD_dfast   },  /* level  4 */
     { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
     { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
     { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
@@ -3849,7 +4046,7 @@
     { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
     { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
     { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
-    { 14, 14, 15,  2,  4,  1, ZSTD_dfast   },  /* level  3 */
+    { 14, 14, 15,  2,  4,  0, ZSTD_dfast   },  /* level  3 */
     { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4 */
     { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
     { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
--- a/contrib/python-zstandard/zstd/compress/zstd_compress_internal.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_internal.h	Tue Jan 21 13:14:51 2020 -0500
@@ -19,6 +19,7 @@
 *  Dependencies
 ***************************************/
 #include "zstd_internal.h"
+#include "zstd_cwksp.h"
 #ifdef ZSTD_MULTITHREAD
 #  include "zstdmt_compress.h"
 #endif
@@ -192,6 +193,13 @@
   size_t capacity; /* The capacity starting from `seq` pointer */
 } rawSeqStore_t;
 
+typedef struct {
+    int collectSequences;
+    ZSTD_Sequence* seqStart;
+    size_t seqIndex;
+    size_t maxSequences;
+} SeqCollector;
+
 struct ZSTD_CCtx_params_s {
     ZSTD_format_e format;
     ZSTD_compressionParameters cParams;
@@ -203,6 +211,9 @@
     size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.
                                 * No target when targetCBlockSize == 0.
                                 * There is no guarantee on compressed block size */
+    int srcSizeHint;           /* User's best guess of source size.
+                                * Hint is not valid when srcSizeHint == 0.
+                                * There is no guarantee that hint is close to actual source size */
 
     ZSTD_dictAttachPref_e attachDictPref;
     ZSTD_literalCompressionMode_e literalCompressionMode;
@@ -228,9 +239,7 @@
     ZSTD_CCtx_params appliedParams;
     U32   dictID;
 
-    int workSpaceOversizedDuration;
-    void* workSpace;
-    size_t workSpaceSize;
+    ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
     size_t blockSize;
     unsigned long long pledgedSrcSizePlusOne;  /* this way, 0 (default) == unknown */
     unsigned long long consumedSrcSize;
@@ -238,6 +247,8 @@
     XXH64_state_t xxhState;
     ZSTD_customMem customMem;
     size_t staticSize;
+    SeqCollector seqCollector;
+    int isFirstBlock;
 
     seqStore_t seqStore;      /* sequences storage ptrs */
     ldmState_t ldmState;      /* long distance matching state */
@@ -337,26 +348,57 @@
     return (srcSize >> minlog) + 2;
 }
 
+/*! ZSTD_safecopyLiterals() :
+ *  memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
+ *  Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
+ *  large copies.
+ */
+static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
+    assert(iend > ilimit_w);
+    if (ip <= ilimit_w) {
+        ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
+        op += ilimit_w - ip;
+        ip = ilimit_w;
+    }
+    while (ip < iend) *op++ = *ip++;
+}
+
 /*! ZSTD_storeSeq() :
- *  Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
- *  `offsetCode` : distance to match + 3 (values 1-3 are repCodes).
+ *  Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
+ *  `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
  *  `mlBase` : matchLength - MINMATCH
+ *  Allowed to overread literals up to litLimit.
 */
-MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
+HINT_INLINE UNUSED_ATTR
+void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
 {
+    BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
+    BYTE const* const litEnd = literals + litLength;
 #if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
     static const BYTE* g_start = NULL;
     if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
     {   U32 const pos = (U32)((const BYTE*)literals - g_start);
         DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
-               pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
+               pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
     }
 #endif
     assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
     /* copy Literals */
     assert(seqStorePtr->maxNbLit <= 128 KB);
     assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
-    ZSTD_wildcopy(seqStorePtr->lit, literals, (ptrdiff_t)litLength, ZSTD_no_overlap);
+    assert(literals + litLength <= litLimit);
+    if (litEnd <= litLimit_w) {
+        /* Common case we can use wildcopy.
+	 * First copy 16 bytes, because literals are likely short.
+	 */
+        assert(WILDCOPY_OVERLENGTH >= 16);
+        ZSTD_copy16(seqStorePtr->lit, literals);
+        if (litLength > 16) {
+            ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
+        }
+    } else {
+        ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
+    }
     seqStorePtr->lit += litLength;
 
     /* literal Length */
@@ -368,7 +410,7 @@
     seqStorePtr->sequences[0].litLength = (U16)litLength;
 
     /* match offset */
-    seqStorePtr->sequences[0].offset = offsetCode + 1;
+    seqStorePtr->sequences[0].offset = offCode + 1;
 
     /* match Length */
     if (mlBase>0xFFFF) {
@@ -910,7 +952,7 @@
 size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
                      const void* dict, size_t dictSize,
                      const ZSTD_CDict* cdict,
-                     ZSTD_CCtx_params  params, unsigned long long pledgedSrcSize);
+                     const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
 
 void ZSTD_resetSeqStore(seqStore_t* ssPtr);
 
@@ -925,7 +967,7 @@
                                     ZSTD_dictContentType_e dictContentType,
                                     ZSTD_dictTableLoadMethod_e dtlm,
                                     const ZSTD_CDict* cdict,
-                                    ZSTD_CCtx_params params,
+                                    const ZSTD_CCtx_params* params,
                                     unsigned long long pledgedSrcSize);
 
 /* ZSTD_compress_advanced_internal() :
@@ -934,7 +976,7 @@
                                        void* dst, size_t dstCapacity,
                                  const void* src, size_t srcSize,
                                  const void* dict,size_t dictSize,
-                                 ZSTD_CCtx_params params);
+                                 const ZSTD_CCtx_params* params);
 
 
 /* ZSTD_writeLastEmptyBlock() :
--- a/contrib/python-zstandard/zstd/compress/zstd_compress_literals.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_literals.c	Tue Jan 21 13:14:51 2020 -0500
@@ -70,7 +70,7 @@
                               ZSTD_strategy strategy, int disableLiteralCompression,
                               void* dst, size_t dstCapacity,
                         const void* src, size_t srcSize,
-                              void* workspace, size_t wkspSize,
+                              void* entropyWorkspace, size_t entropyWorkspaceSize,
                         const int bmi2)
 {
     size_t const minGain = ZSTD_minGain(srcSize, strategy);
@@ -99,10 +99,15 @@
     {   HUF_repeat repeat = prevHuf->repeatMode;
         int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
         if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
-        cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
-                                      workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
-                                : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
-                                      workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
+        cLitSize = singleStream ?
+            HUF_compress1X_repeat(
+                ostart+lhSize, dstCapacity-lhSize, src, srcSize,
+                255, 11, entropyWorkspace, entropyWorkspaceSize,
+                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
+            HUF_compress4X_repeat(
+                ostart+lhSize, dstCapacity-lhSize, src, srcSize,
+                255, 11, entropyWorkspace, entropyWorkspaceSize,
+                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
         if (repeat != HUF_repeat_none) {
             /* reused the existing table */
             hType = set_repeat;
--- a/contrib/python-zstandard/zstd/compress/zstd_compress_literals.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_literals.h	Tue Jan 21 13:14:51 2020 -0500
@@ -23,7 +23,7 @@
                               ZSTD_strategy strategy, int disableLiteralCompression,
                               void* dst, size_t dstCapacity,
                         const void* src, size_t srcSize,
-                              void* workspace, size_t wkspSize,
+                              void* entropyWorkspace, size_t entropyWorkspaceSize,
                         const int bmi2);
 
 #endif /* ZSTD_COMPRESS_LITERALS_H */
--- a/contrib/python-zstandard/zstd/compress/zstd_compress_sequences.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_sequences.c	Tue Jan 21 13:14:51 2020 -0500
@@ -222,7 +222,7 @@
                 const BYTE* codeTable, size_t nbSeq,
                 const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
                 const FSE_CTable* prevCTable, size_t prevCTableSize,
-                void* workspace, size_t workspaceSize)
+                void* entropyWorkspace, size_t entropyWorkspaceSize)
 {
     BYTE* op = (BYTE*)dst;
     const BYTE* const oend = op + dstCapacity;
@@ -238,7 +238,7 @@
         memcpy(nextCTable, prevCTable, prevCTableSize);
         return 0;
     case set_basic:
-        FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
+        FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize));  /* note : could be pre-calculated */
         return 0;
     case set_compressed: {
         S16 norm[MaxSeq + 1];
@@ -252,7 +252,7 @@
         FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
         {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
             FORWARD_IF_ERROR(NCountSize);
-            FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
+            FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize));
             return NCountSize;
         }
     }
--- a/contrib/python-zstandard/zstd/compress/zstd_compress_sequences.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_sequences.h	Tue Jan 21 13:14:51 2020 -0500
@@ -35,7 +35,7 @@
                 const BYTE* codeTable, size_t nbSeq,
                 const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
                 const FSE_CTable* prevCTable, size_t prevCTableSize,
-                void* workspace, size_t workspaceSize);
+                void* entropyWorkspace, size_t entropyWorkspaceSize);
 
 size_t ZSTD_encodeSequences(
             void* dst, size_t dstCapacity,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstd_cwksp.h	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,535 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CWKSP_H
+#define ZSTD_CWKSP_H
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include "zstd_internal.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-*************************************
+*  Constants
+***************************************/
+
+/* define "workspace is too large" as this number of times larger than needed */
+#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
+
+/* when workspace is continuously too large
+ * during at least this number of times,
+ * context's memory usage is considered wasteful,
+ * because it's sized to handle a worst case scenario which rarely happens.
+ * In which case, resize it down to free some memory */
+#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
+
+/* Since the workspace is effectively its own little malloc implementation /
+ * arena, when we run under ASAN, we should similarly insert redzones between
+ * each internal element of the workspace, so ASAN will catch overruns that
+ * reach outside an object but that stay inside the workspace.
+ *
+ * This defines the size of that redzone.
+ */
+#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
+#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
+#endif
+
+/*-*************************************
+*  Structures
+***************************************/
+typedef enum {
+    ZSTD_cwksp_alloc_objects,
+    ZSTD_cwksp_alloc_buffers,
+    ZSTD_cwksp_alloc_aligned
+} ZSTD_cwksp_alloc_phase_e;
+
+/**
+ * Zstd fits all its internal datastructures into a single continuous buffer,
+ * so that it only needs to perform a single OS allocation (or so that a buffer
+ * can be provided to it and it can perform no allocations at all). This buffer
+ * is called the workspace.
+ *
+ * Several optimizations complicate that process of allocating memory ranges
+ * from this workspace for each internal datastructure:
+ *
+ * - These different internal datastructures have different setup requirements:
+ *
+ *   - The static objects need to be cleared once and can then be trivially
+ *     reused for each compression.
+ *
+ *   - Various buffers don't need to be initialized at all--they are always
+ *     written into before they're read.
+ *
+ *   - The matchstate tables have a unique requirement that they don't need
+ *     their memory to be totally cleared, but they do need the memory to have
+ *     some bound, i.e., a guarantee that all values in the memory they've been
+ *     allocated is less than some maximum value (which is the starting value
+ *     for the indices that they will then use for compression). When this
+ *     guarantee is provided to them, they can use the memory without any setup
+ *     work. When it can't, they have to clear the area.
+ *
+ * - These buffers also have different alignment requirements.
+ *
+ * - We would like to reuse the objects in the workspace for multiple
+ *   compressions without having to perform any expensive reallocation or
+ *   reinitialization work.
+ *
+ * - We would like to be able to efficiently reuse the workspace across
+ *   multiple compressions **even when the compression parameters change** and
+ *   we need to resize some of the objects (where possible).
+ *
+ * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
+ * abstraction was created. It works as follows:
+ *
+ * Workspace Layout:
+ *
+ * [                        ... workspace ...                         ]
+ * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
+ *
+ * The various objects that live in the workspace are divided into the
+ * following categories, and are allocated separately:
+ *
+ * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
+ *   so that literally everything fits in a single buffer. Note: if present,
+ *   this must be the first object in the workspace, since ZSTD_free{CCtx,
+ *   CDict}() rely on a pointer comparison to see whether one or two frees are
+ *   required.
+ *
+ * - Fixed size objects: these are fixed-size, fixed-count objects that are
+ *   nonetheless "dynamically" allocated in the workspace so that we can
+ *   control how they're initialized separately from the broader ZSTD_CCtx.
+ *   Examples:
+ *   - Entropy Workspace
+ *   - 2 x ZSTD_compressedBlockState_t
+ *   - CDict dictionary contents
+ *
+ * - Tables: these are any of several different datastructures (hash tables,
+ *   chain tables, binary trees) that all respect a common format: they are
+ *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
+ *   Their sizes depend on the cparams.
+ *
+ * - Aligned: these buffers are used for various purposes that require 4 byte
+ *   alignment, but don't require any initialization before they're used.
+ *
+ * - Buffers: these buffers are used for various purposes that don't require
+ *   any alignment or initialization before they're used. This means they can
+ *   be moved around at no cost for a new compression.
+ *
+ * Allocating Memory:
+ *
+ * The various types of objects must be allocated in order, so they can be
+ * correctly packed into the workspace buffer. That order is:
+ *
+ * 1. Objects
+ * 2. Buffers
+ * 3. Aligned
+ * 4. Tables
+ *
+ * Attempts to reserve objects of different types out of order will fail.
+ */
+typedef struct {
+    void* workspace;
+    void* workspaceEnd;
+
+    void* objectEnd;
+    void* tableEnd;
+    void* tableValidEnd;
+    void* allocStart;
+
+    int allocFailed;
+    int workspaceOversizedDuration;
+    ZSTD_cwksp_alloc_phase_e phase;
+} ZSTD_cwksp;
+
+/*-*************************************
+*  Functions
+***************************************/
+
+MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
+
+MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
+    (void)ws;
+    assert(ws->workspace <= ws->objectEnd);
+    assert(ws->objectEnd <= ws->tableEnd);
+    assert(ws->objectEnd <= ws->tableValidEnd);
+    assert(ws->tableEnd <= ws->allocStart);
+    assert(ws->tableValidEnd <= ws->allocStart);
+    assert(ws->allocStart <= ws->workspaceEnd);
+}
+
+/**
+ * Align must be a power of 2.
+ */
+MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
+    size_t const mask = align - 1;
+    assert((align & mask) == 0);
+    return (size + mask) & ~mask;
+}
+
+/**
+ * Use this to determine how much space in the workspace we will consume to
+ * allocate this object. (Normally it should be exactly the size of the object,
+ * but under special conditions, like ASAN, where we pad each object, it might
+ * be larger.)
+ *
+ * Since tables aren't currently redzoned, you don't need to call through this
+ * to figure out how much space you need for the matchState tables. Everything
+ * else is though.
+ */
+MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
+#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+    return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
+#else
+    return size;
+#endif
+}
+
+MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
+        ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
+    assert(phase >= ws->phase);
+    if (phase > ws->phase) {
+        if (ws->phase < ZSTD_cwksp_alloc_buffers &&
+                phase >= ZSTD_cwksp_alloc_buffers) {
+            ws->tableValidEnd = ws->objectEnd;
+        }
+        if (ws->phase < ZSTD_cwksp_alloc_aligned &&
+                phase >= ZSTD_cwksp_alloc_aligned) {
+            /* If unaligned allocations down from a too-large top have left us
+             * unaligned, we need to realign our alloc ptr. Technically, this
+             * can consume space that is unaccounted for in the neededSpace
+             * calculation. However, I believe this can only happen when the
+             * workspace is too large, and specifically when it is too large
+             * by a larger margin than the space that will be consumed. */
+            /* TODO: cleaner, compiler warning friendly way to do this??? */
+            ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
+            if (ws->allocStart < ws->tableValidEnd) {
+                ws->tableValidEnd = ws->allocStart;
+            }
+        }
+        ws->phase = phase;
+    }
+}
+
+/**
+ * Returns whether this object/buffer/etc was allocated in this workspace.
+ */
+MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
+    return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
+}
+
+/**
+ * Internal function. Do not use directly.
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_internal(
+        ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
+    void* alloc;
+    void* bottom = ws->tableEnd;
+    ZSTD_cwksp_internal_advance_phase(ws, phase);
+    alloc = (BYTE *)ws->allocStart - bytes;
+
+#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+    /* over-reserve space */
+    alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
+#endif
+
+    DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
+        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
+    ZSTD_cwksp_assert_internal_consistency(ws);
+    assert(alloc >= bottom);
+    if (alloc < bottom) {
+        DEBUGLOG(4, "cwksp: alloc failed!");
+        ws->allocFailed = 1;
+        return NULL;
+    }
+    if (alloc < ws->tableValidEnd) {
+        ws->tableValidEnd = alloc;
+    }
+    ws->allocStart = alloc;
+
+#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+    /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
+     * either size. */
+    alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
+    __asan_unpoison_memory_region(alloc, bytes);
+#endif
+
+    return alloc;
+}
+
+/**
+ * Reserves and returns unaligned memory.
+ */
+MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
+    return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
+}
+
+/**
+ * Reserves and returns memory sized on and aligned on sizeof(unsigned).
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
+    assert((bytes & (sizeof(U32)-1)) == 0);
+    return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
+}
+
+/**
+ * Aligned on sizeof(unsigned). These buffers have the special property that
+ * their values remain constrained, allowing us to re-use them without
+ * memset()-ing them.
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
+    const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
+    void* alloc = ws->tableEnd;
+    void* end = (BYTE *)alloc + bytes;
+    void* top = ws->allocStart;
+
+    DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
+        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
+    assert((bytes & (sizeof(U32)-1)) == 0);
+    ZSTD_cwksp_internal_advance_phase(ws, phase);
+    ZSTD_cwksp_assert_internal_consistency(ws);
+    assert(end <= top);
+    if (end > top) {
+        DEBUGLOG(4, "cwksp: table alloc failed!");
+        ws->allocFailed = 1;
+        return NULL;
+    }
+    ws->tableEnd = end;
+
+#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+    __asan_unpoison_memory_region(alloc, bytes);
+#endif
+
+    return alloc;
+}
+
+/**
+ * Aligned on sizeof(void*).
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
+    size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
+    void* alloc = ws->objectEnd;
+    void* end = (BYTE*)alloc + roundedBytes;
+
+#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+    /* over-reserve space */
+    end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
+#endif
+
+    DEBUGLOG(5,
+        "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
+        alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
+    assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
+    assert((bytes & (sizeof(void*)-1)) == 0);
+    ZSTD_cwksp_assert_internal_consistency(ws);
+    /* we must be in the first phase, no advance is possible */
+    if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
+        DEBUGLOG(4, "cwksp: object alloc failed!");
+        ws->allocFailed = 1;
+        return NULL;
+    }
+    ws->objectEnd = end;
+    ws->tableEnd = end;
+    ws->tableValidEnd = end;
+
+#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+    /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
+     * either size. */
+    alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
+    __asan_unpoison_memory_region(alloc, bytes);
+#endif
+
+    return alloc;
+}
+
+MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
+    DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
+
+#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
+    /* To validate that the table re-use logic is sound, and that we don't
+     * access table space that we haven't cleaned, we re-"poison" the table
+     * space every time we mark it dirty. */
+    {
+        size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
+        assert(__msan_test_shadow(ws->objectEnd, size) == -1);
+        __msan_poison(ws->objectEnd, size);
+    }
+#endif
+
+    assert(ws->tableValidEnd >= ws->objectEnd);
+    assert(ws->tableValidEnd <= ws->allocStart);
+    ws->tableValidEnd = ws->objectEnd;
+    ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
+    DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
+    assert(ws->tableValidEnd >= ws->objectEnd);
+    assert(ws->tableValidEnd <= ws->allocStart);
+    if (ws->tableValidEnd < ws->tableEnd) {
+        ws->tableValidEnd = ws->tableEnd;
+    }
+    ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/**
+ * Zero the part of the allocated tables not already marked clean.
+ */
+MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
+    DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
+    assert(ws->tableValidEnd >= ws->objectEnd);
+    assert(ws->tableValidEnd <= ws->allocStart);
+    if (ws->tableValidEnd < ws->tableEnd) {
+        memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
+    }
+    ZSTD_cwksp_mark_tables_clean(ws);
+}
+
+/**
+ * Invalidates table allocations.
+ * All other allocations remain valid.
+ */
+MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
+    DEBUGLOG(4, "cwksp: clearing tables!");
+
+#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+    {
+        size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
+        __asan_poison_memory_region(ws->objectEnd, size);
+    }
+#endif
+
+    ws->tableEnd = ws->objectEnd;
+    ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/**
+ * Invalidates all buffer, aligned, and table allocations.
+ * Object allocations remain valid.
+ */
+MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
+    DEBUGLOG(4, "cwksp: clearing!");
+
+#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
+    /* To validate that the context re-use logic is sound, and that we don't
+     * access stuff that this compression hasn't initialized, we re-"poison"
+     * the workspace (or at least the non-static, non-table parts of it)
+     * every time we start a new compression. */
+    {
+        size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
+        __msan_poison(ws->tableValidEnd, size);
+    }
+#endif
+
+#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
+    {
+        size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
+        __asan_poison_memory_region(ws->objectEnd, size);
+    }
+#endif
+
+    ws->tableEnd = ws->objectEnd;
+    ws->allocStart = ws->workspaceEnd;
+    ws->allocFailed = 0;
+    if (ws->phase > ZSTD_cwksp_alloc_buffers) {
+        ws->phase = ZSTD_cwksp_alloc_buffers;
+    }
+    ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/**
+ * The provided workspace takes ownership of the buffer [start, start+size).
+ * Any existing values in the workspace are ignored (the previously managed
+ * buffer, if present, must be separately freed).
+ */
+MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
+    DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
+    assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
+    ws->workspace = start;
+    ws->workspaceEnd = (BYTE*)start + size;
+    ws->objectEnd = ws->workspace;
+    ws->tableValidEnd = ws->objectEnd;
+    ws->phase = ZSTD_cwksp_alloc_objects;
+    ZSTD_cwksp_clear(ws);
+    ws->workspaceOversizedDuration = 0;
+    ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
+    void* workspace = ZSTD_malloc(size, customMem);
+    DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
+    RETURN_ERROR_IF(workspace == NULL, memory_allocation);
+    ZSTD_cwksp_init(ws, workspace, size);
+    return 0;
+}
+
+MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
+    void *ptr = ws->workspace;
+    DEBUGLOG(4, "cwksp: freeing workspace");
+    memset(ws, 0, sizeof(ZSTD_cwksp));
+    ZSTD_free(ptr, customMem);
+}
+
+/**
+ * Moves the management of a workspace from one cwksp to another. The src cwksp
+ * is left in an invalid state (src must be re-init()'ed before its used again).
+ */
+MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
+    *dst = *src;
+    memset(src, 0, sizeof(ZSTD_cwksp));
+}
+
+MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
+    return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
+}
+
+MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
+    return ws->allocFailed;
+}
+
+/*-*************************************
+*  Functions Checking Free Space
+***************************************/
+
+MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
+    return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
+}
+
+MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+    return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
+}
+
+MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+    return ZSTD_cwksp_check_available(
+        ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
+}
+
+MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+    return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
+        && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
+}
+
+MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
+        ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+    if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
+        ws->workspaceOversizedDuration++;
+    } else {
+        ws->workspaceOversizedDuration = 0;
+    }
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_CWKSP_H */
--- a/contrib/python-zstandard/zstd/compress/zstd_double_fast.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_double_fast.c	Tue Jan 21 13:14:51 2020 -0500
@@ -148,7 +148,7 @@
             const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
             mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
             goto _match_stored;
         }
 
@@ -157,7 +157,7 @@
           && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
             mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
             goto _match_stored;
         }
 
@@ -247,7 +247,7 @@
         offset_2 = offset_1;
         offset_1 = offset;
 
-        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
 
 _match_stored:
         /* match found */
@@ -278,7 +278,7 @@
                         const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
                         size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
                         U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
-                        ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                        ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
                         hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
                         hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
                         ip += repLength2;
@@ -297,7 +297,7 @@
                     U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
                     hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
                     hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
                     ip += rLength;
                     anchor = ip;
                     continue;   /* faster when present ... (?) */
@@ -411,7 +411,7 @@
             const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
             mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
         } else {
             if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
                 const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
@@ -422,7 +422,7 @@
                 while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
                 offset_2 = offset_1;
                 offset_1 = offset;
-                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
 
             } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
                 size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
@@ -447,7 +447,7 @@
                 }
                 offset_2 = offset_1;
                 offset_1 = offset;
-                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
 
             } else {
                 ip += ((ip-anchor) >> kSearchStrength) + 1;
@@ -479,7 +479,7 @@
                     const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
                     size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
                     U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
                     hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
                     hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
                     ip += repLength2;
--- a/contrib/python-zstandard/zstd/compress/zstd_fast.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_fast.c	Tue Jan 21 13:14:51 2020 -0500
@@ -8,7 +8,7 @@
  * You may select, at your option, one of the above-listed licenses.
  */
 
-#include "zstd_compress_internal.h"
+#include "zstd_compress_internal.h"  /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
 #include "zstd_fast.h"
 
 
@@ -43,8 +43,8 @@
 }
 
 
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_fast_generic(
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_fast_generic(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize,
         U32 const mls)
@@ -74,8 +74,7 @@
     DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
     ip0 += (ip0 == prefixStart);
     ip1 = ip0 + 1;
-    {
-        U32 const maxRep = (U32)(ip0 - prefixStart);
+    {   U32 const maxRep = (U32)(ip0 - prefixStart);
         if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
         if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
     }
@@ -118,8 +117,7 @@
             match0 = match1;
             goto _offset;
         }
-        {
-            size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
+        {   size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
             assert(step >= 2);
             ip0 += step;
             ip1 += step;
@@ -138,7 +136,7 @@
 _match: /* Requires: ip0, match0, offcode */
         /* Count the forward length */
         mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
-        ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
+        ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
         /* match found */
         ip0 += mLength;
         anchor = ip0;
@@ -150,16 +148,15 @@
             hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
             hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
 
-            while ( (ip0 <= ilimit)
-                 && ( (offset_2>0)
-                    & (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
+            while ( ((ip0 <= ilimit) & (offset_2>0))  /* offset_2==0 means offset_2 is invalidated */
+                 && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
                 /* store sequence */
                 size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
-                U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
+                { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
                 hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
                 ip0 += rLength;
                 ip1 = ip0 + 1;
-                ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
                 anchor = ip0;
                 continue;   /* faster when present (confirmed on gcc-8) ... (?) */
             }
@@ -179,8 +176,7 @@
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    ZSTD_compressionParameters const* cParams = &ms->cParams;
-    U32 const mls = cParams->minMatch;
+    U32 const mls = ms->cParams.minMatch;
     assert(ms->dictMatchState == NULL);
     switch(mls)
     {
@@ -265,7 +261,7 @@
             const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
             mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
         } else if ( (matchIndex <= prefixStartIndex) ) {
             size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
             U32 const dictMatchIndex = dictHashTable[dictHash];
@@ -285,7 +281,7 @@
                 } /* catch up */
                 offset_2 = offset_1;
                 offset_1 = offset;
-                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
             }
         } else if (MEM_read32(match) != MEM_read32(ip)) {
             /* it's not a match, and we're not going to check the dictionary */
@@ -300,7 +296,7 @@
                  && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
             offset_2 = offset_1;
             offset_1 = offset;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
         }
 
         /* match found */
@@ -325,7 +321,7 @@
                     const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
                     size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
                     U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
                     hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
                     ip += repLength2;
                     anchor = ip;
@@ -348,8 +344,7 @@
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    ZSTD_compressionParameters const* cParams = &ms->cParams;
-    U32 const mls = cParams->minMatch;
+    U32 const mls = ms->cParams.minMatch;
     assert(ms->dictMatchState != NULL);
     switch(mls)
     {
@@ -408,16 +403,17 @@
         const U32    repIndex = current + 1 - offset_1;
         const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
         const BYTE* const repMatch = repBase + repIndex;
-        size_t mLength;
         hashTable[h] = current;   /* update hash table */
         assert(offset_1 <= current +1);   /* check repIndex */
 
         if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
             const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
-            mLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
+            size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
+            ip += rLength;
+            anchor = ip;
         } else {
             if ( (matchIndex < dictStartIndex) ||
                  (MEM_read32(match) != MEM_read32(ip)) ) {
@@ -427,19 +423,15 @@
             }
             {   const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
                 const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
-                U32 offset;
-                mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
+                U32 const offset = current - matchIndex;
+                size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
                 while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
-                offset = current - matchIndex;
-                offset_2 = offset_1;
-                offset_1 = offset;
-                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                offset_2 = offset_1; offset_1 = offset;  /* update offset history */
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                ip += mLength;
+                anchor = ip;
         }   }
 
-        /* found a match : store it */
-        ip += mLength;
-        anchor = ip;
-
         if (ip <= ilimit) {
             /* Fill Table */
             hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
@@ -448,13 +440,13 @@
             while (ip <= ilimit) {
                 U32 const current2 = (U32)(ip-base);
                 U32 const repIndex2 = current2 - offset_2;
-                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+                const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
                 if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex))  /* intentional overflow */
                    && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
                     const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
                     size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
-                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                    { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; }  /* swap offset_2 <=> offset_1 */
+                    ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
                     hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
                     ip += repLength2;
                     anchor = ip;
@@ -476,8 +468,7 @@
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize)
 {
-    ZSTD_compressionParameters const* cParams = &ms->cParams;
-    U32 const mls = cParams->minMatch;
+    U32 const mls = ms->cParams.minMatch;
     switch(mls)
     {
     default: /* includes case 3 */
--- a/contrib/python-zstandard/zstd/compress/zstd_lazy.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_lazy.c	Tue Jan 21 13:14:51 2020 -0500
@@ -810,7 +810,7 @@
         /* store sequence */
 _storeSequence:
         {   size_t const litLength = start - anchor;
-            ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
             anchor = ip = start + matchLength;
         }
 
@@ -828,7 +828,7 @@
                     const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
                     matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
                     offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset_2 <=> offset_1 */
-                    ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
                     ip += matchLength;
                     anchor = ip;
                     continue;
@@ -843,7 +843,7 @@
                 /* store sequence */
                 matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
                 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
-                ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
                 ip += matchLength;
                 anchor = ip;
                 continue;   /* faster when present ... (?) */
@@ -1051,7 +1051,7 @@
         /* store sequence */
 _storeSequence:
         {   size_t const litLength = start - anchor;
-            ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
             anchor = ip = start + matchLength;
         }
 
@@ -1066,7 +1066,7 @@
                 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
                 matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
                 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */
-                ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
                 ip += matchLength;
                 anchor = ip;
                 continue;   /* faster when present ... (?) */
--- a/contrib/python-zstandard/zstd/compress/zstd_ldm.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_ldm.c	Tue Jan 21 13:14:51 2020 -0500
@@ -49,9 +49,9 @@
 {
     size_t const ldmHSize = ((size_t)1) << params.hashLog;
     size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
-    size_t const ldmBucketSize =
-        ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
-    size_t const totalSize = ldmBucketSize + ldmHSize * sizeof(ldmEntry_t);
+    size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
+    size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
+                           + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
     return params.enableLdm ? totalSize : 0;
 }
 
@@ -583,7 +583,7 @@
                 rep[i] = rep[i-1];
             rep[0] = sequence.offset;
             /* Store the sequence */
-            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength,
+            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
                           sequence.offset + ZSTD_REP_MOVE,
                           sequence.matchLength - MINMATCH);
             ip += sequence.matchLength;
--- a/contrib/python-zstandard/zstd/compress/zstd_opt.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstd_opt.c	Tue Jan 21 13:14:51 2020 -0500
@@ -1098,7 +1098,7 @@
 
                     assert(anchor + llen <= iend);
                     ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
-                    ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH);
+                    ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
                     anchor += advance;
                     ip = anchor;
             }   }
--- a/contrib/python-zstandard/zstd/compress/zstdmt_compress.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.c	Tue Jan 21 13:14:51 2020 -0500
@@ -668,7 +668,7 @@
 
     /* init */
     if (job->cdict) {
-        size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
+        size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
         assert(job->firstJob);  /* only allowed for first job */
         if (ZSTD_isError(initError)) JOB_ERROR(initError);
     } else {  /* srcStart points at reloaded section */
@@ -680,7 +680,7 @@
                                         job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
                                         ZSTD_dtlm_fast,
                                         NULL, /*cdict*/
-                                        jobParams, pledgedSrcSize);
+                                        &jobParams, pledgedSrcSize);
             if (ZSTD_isError(initError)) JOB_ERROR(initError);
     }   }
 
@@ -927,12 +927,18 @@
     unsigned jobID;
     DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
     for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
+        /* Copy the mutex/cond out */
+        ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
+        ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
+
         DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
         ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
-        mtctx->jobs[jobID].dstBuff = g_nullBuffer;
-        mtctx->jobs[jobID].cSize = 0;
+
+        /* Clear the job description, but keep the mutex/cond */
+        memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
+        mtctx->jobs[jobID].job_mutex = mutex;
+        mtctx->jobs[jobID].job_cond = cond;
     }
-    memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
     mtctx->inBuff.buffer = g_nullBuffer;
     mtctx->inBuff.filled = 0;
     mtctx->allJobsCompleted = 1;
@@ -1028,9 +1034,9 @@
 
 /* Sets parameters relevant to the compression job,
  * initializing others to default values. */
-static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
+static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(const ZSTD_CCtx_params* params)
 {
-    ZSTD_CCtx_params jobParams = params;
+    ZSTD_CCtx_params jobParams = *params;
     /* Clear parameters related to multithreading */
     jobParams.forceWindow = 0;
     jobParams.nbWorkers = 0;
@@ -1151,16 +1157,16 @@
 /* =====   Multi-threaded compression   ===== */
 /* ------------------------------------------ */
 
-static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
+static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
 {
     unsigned jobLog;
-    if (params.ldmParams.enableLdm) {
+    if (params->ldmParams.enableLdm) {
         /* In Long Range Mode, the windowLog is typically oversized.
          * In which case, it's preferable to determine the jobSize
          * based on chainLog instead. */
-        jobLog = MAX(21, params.cParams.chainLog + 4);
+        jobLog = MAX(21, params->cParams.chainLog + 4);
     } else {
-        jobLog = MAX(20, params.cParams.windowLog + 2);
+        jobLog = MAX(20, params->cParams.windowLog + 2);
     }
     return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
 }
@@ -1193,27 +1199,27 @@
     return ovlog;
 }
 
-static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params)
+static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
 {
-    int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy);
-    int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog);
+    int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
+    int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
     assert(0 <= overlapRLog && overlapRLog <= 8);
-    if (params.ldmParams.enableLdm) {
+    if (params->ldmParams.enableLdm) {
         /* In Long Range Mode, the windowLog is typically oversized.
          * In which case, it's preferable to determine the jobSize
          * based on chainLog instead.
          * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
-        ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
+        ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
                 - overlapRLog;
     }
     assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
-    DEBUGLOG(4, "overlapLog : %i", params.overlapLog);
+    DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
     DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
     return (ovLog==0) ? 0 : (size_t)1 << ovLog;
 }
 
 static unsigned
-ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers)
+ZSTDMT_computeNbJobs(const ZSTD_CCtx_params* params, size_t srcSize, unsigned nbWorkers)
 {
     assert(nbWorkers>0);
     {   size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
@@ -1236,9 +1242,9 @@
           const ZSTD_CDict* cdict,
                 ZSTD_CCtx_params params)
 {
-    ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
-    size_t const overlapSize = ZSTDMT_computeOverlapSize(params);
-    unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
+    ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(&params);
+    size_t const overlapSize = ZSTDMT_computeOverlapSize(&params);
+    unsigned const nbJobs = ZSTDMT_computeNbJobs(&params, srcSize, params.nbWorkers);
     size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
     size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize;   /* avoid too small last block */
     const char* const srcStart = (const char*)src;
@@ -1256,7 +1262,7 @@
         ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
         DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
         if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
-        return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
+        return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, &jobParams);
     }
 
     assert(avgJobSize >= 256 KB);  /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
@@ -1404,12 +1410,12 @@
 
     mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN);  /* do not trigger multi-threading when srcSize is too small */
     if (mtctx->singleBlockingThread) {
-        ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
+        ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(&params);
         DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
         assert(singleThreadParams.nbWorkers == 0);
         return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
                                          dict, dictSize, cdict,
-                                         singleThreadParams, pledgedSrcSize);
+                                         &singleThreadParams, pledgedSrcSize);
     }
 
     DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
@@ -1435,11 +1441,11 @@
         mtctx->cdict = cdict;
     }
 
-    mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params);
+    mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&params);
     DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
     mtctx->targetSectionSize = params.jobSize;
     if (mtctx->targetSectionSize == 0) {
-        mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
+        mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(&params);
     }
     assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
 
--- a/contrib/python-zstandard/zstd/decompress/huf_decompress.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/decompress/huf_decompress.c	Tue Jan 21 13:14:51 2020 -0500
@@ -61,7 +61,9 @@
 *  Error Management
 ****************************************************************/
 #define HUF_isError ERR_isError
+#ifndef CHECK_F
 #define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
+#endif
 
 
 /* **************************************************************
--- a/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Tue Jan 21 13:14:51 2020 -0500
@@ -88,10 +88,7 @@
 
 static size_t ZSTD_startingInputLength(ZSTD_format_e format)
 {
-    size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
-                    ZSTD_FRAMEHEADERSIZE_PREFIX - ZSTD_FRAMEIDSIZE :
-                    ZSTD_FRAMEHEADERSIZE_PREFIX;
-    ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
+    size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
     /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
     assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
     return startingInputLength;
@@ -376,7 +373,7 @@
 {
     unsigned long long totalDstSize = 0;
 
-    while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
+    while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
         U32 const magicNumber = MEM_readLE32(src);
 
         if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
@@ -629,11 +626,12 @@
 
     /* check */
     RETURN_ERROR_IF(
-        remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize,
+        remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
         srcSize_wrong);
 
     /* Frame Header */
-    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
+    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
+                ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
         if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
         RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
                         srcSize_wrong);
@@ -714,7 +712,7 @@
         dictSize = ZSTD_DDict_dictSize(ddict);
     }
 
-    while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
+    while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
 
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
         if (ZSTD_isLegacy(src, srcSize)) {
@@ -1098,7 +1096,7 @@
         size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
         for (i=0; i<3; i++) {
             U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
-            RETURN_ERROR_IF(rep==0 || rep >= dictContentSize,
+            RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
                             dictionary_corrupted);
             entropy->rep[i] = rep;
     }   }
@@ -1267,7 +1265,7 @@
 {
     RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
     ZSTD_clearDict(dctx);
-    if (dict && dictSize >= 8) {
+    if (dict && dictSize != 0) {
         dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
         RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);
         dctx->ddict = dctx->ddictLocal;
@@ -1300,14 +1298,14 @@
 
 
 /* ZSTD_initDStream_usingDict() :
- * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * return : expected size, aka ZSTD_startingInputLength().
  * this function cannot fail */
 size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
 {
     DEBUGLOG(4, "ZSTD_initDStream_usingDict");
     FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) );
     FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
-    return ZSTD_FRAMEHEADERSIZE_PREFIX;
+    return ZSTD_startingInputLength(zds->format);
 }
 
 /* note : this variant can't fail */
@@ -1324,16 +1322,16 @@
 {
     FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) );
     FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) );
-    return ZSTD_FRAMEHEADERSIZE_PREFIX;
+    return ZSTD_startingInputLength(dctx->format);
 }
 
 /* ZSTD_resetDStream() :
- * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * return : expected size, aka ZSTD_startingInputLength().
  * this function cannot fail */
 size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
 {
     FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only));
-    return ZSTD_FRAMEHEADERSIZE_PREFIX;
+    return ZSTD_startingInputLength(dctx->format);
 }
 
 
@@ -1564,7 +1562,7 @@
                             zds->lhSize += remainingInput;
                         }
                         input->pos = input->size;
-                        return (MAX(ZSTD_FRAMEHEADERSIZE_MIN, hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
+                        return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
                     }
                     assert(ip != NULL);
                     memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
--- a/contrib/python-zstandard/zstd/decompress/zstd_decompress_block.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/decompress/zstd_decompress_block.c	Tue Jan 21 13:14:51 2020 -0500
@@ -573,38 +573,118 @@
     size_t pos;
 } seqState_t;
 
+/*! ZSTD_overlapCopy8() :
+ *  Copies 8 bytes from ip to op and updates op and ip where ip <= op.
+ *  If the offset is < 8 then the offset is spread to at least 8 bytes.
+ *
+ *  Precondition: *ip <= *op
+ *  Postcondition: *op - *op >= 8
+ */
+static void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
+    assert(*ip <= *op);
+    if (offset < 8) {
+        /* close range match, overlap */
+        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
+        int const sub2 = dec64table[offset];
+        (*op)[0] = (*ip)[0];
+        (*op)[1] = (*ip)[1];
+        (*op)[2] = (*ip)[2];
+        (*op)[3] = (*ip)[3];
+        *ip += dec32table[offset];
+        ZSTD_copy4(*op+4, *ip);
+        *ip -= sub2;
+    } else {
+        ZSTD_copy8(*op, *ip);
+    }
+    *ip += 8;
+    *op += 8;
+    assert(*op - *ip >= 8);
+}
 
-/* ZSTD_execSequenceLast7():
- * exceptional case : decompress a match starting within last 7 bytes of output buffer.
- * requires more careful checks, to ensure there is no overflow.
- * performance does not matter though.
- * note : this case is supposed to be never generated "naturally" by reference encoder,
- *        since in most cases it needs at least 8 bytes to look for a match.
- *        but it's allowed by the specification. */
+/*! ZSTD_safecopy() :
+ *  Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
+ *  and write up to 16 bytes past oend_w (op >= oend_w is allowed).
+ *  This function is only called in the uncommon case where the sequence is near the end of the block. It
+ *  should be fast for a single long sequence, but can be slow for several short sequences.
+ *
+ *  @param ovtype controls the overlap detection
+ *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
+ *         - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
+ *           The src buffer must be before the dst buffer.
+ */
+static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
+    ptrdiff_t const diff = op - ip;
+    BYTE* const oend = op + length;
+
+    assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
+           (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
+
+    if (length < 8) {
+        /* Handle short lengths. */
+        while (op < oend) *op++ = *ip++;
+        return;
+    }
+    if (ovtype == ZSTD_overlap_src_before_dst) {
+        /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
+        assert(length >= 8);
+        ZSTD_overlapCopy8(&op, &ip, diff);
+        assert(op - ip >= 8);
+        assert(op <= oend);
+    }
+
+    if (oend <= oend_w) {
+        /* No risk of overwrite. */
+        ZSTD_wildcopy(op, ip, length, ovtype);
+        return;
+    }
+    if (op <= oend_w) {
+        /* Wildcopy until we get close to the end. */
+        assert(oend > oend_w);
+        ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
+        ip += oend_w - op;
+        op = oend_w;
+    }
+    /* Handle the leftovers. */
+    while (op < oend) *op++ = *ip++;
+}
+
+/* ZSTD_execSequenceEnd():
+ * This version handles cases that are near the end of the output buffer. It requires
+ * more careful checks to make sure there is no overflow. By separating out these hard
+ * and unlikely cases, we can speed up the common cases.
+ *
+ * NOTE: This function needs to be fast for a single long sequence, but doesn't need
+ * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
+ */
 FORCE_NOINLINE
-size_t ZSTD_execSequenceLast7(BYTE* op,
-                              BYTE* const oend, seq_t sequence,
-                              const BYTE** litPtr, const BYTE* const litLimit,
-                              const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+size_t ZSTD_execSequenceEnd(BYTE* op,
+                            BYTE* const oend, seq_t sequence,
+                            const BYTE** litPtr, const BYTE* const litLimit,
+                            const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
 {
     BYTE* const oLitEnd = op + sequence.litLength;
     size_t const sequenceLength = sequence.litLength + sequence.matchLength;
     BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
     const BYTE* const iLitEnd = *litPtr + sequence.litLength;
     const BYTE* match = oLitEnd - sequence.offset;
+    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
 
-    /* check */
-    RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
+    /* bounds checks */
+    assert(oLitEnd < oMatchEnd);
+    RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must fit within dstBuffer");
     RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
 
     /* copy literals */
-    while (op < oLitEnd) *op++ = *(*litPtr)++;
+    ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
+    op = oLitEnd;
+    *litPtr = iLitEnd;
 
     /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - base)) {
+    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
         /* offset beyond prefix */
-        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
-        match = dictEnd - (base-match);
+        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
+        match = dictEnd - (prefixStart-match);
         if (match + sequence.matchLength <= dictEnd) {
             memmove(oLitEnd, match, sequence.matchLength);
             return sequenceLength;
@@ -614,13 +694,12 @@
             memmove(oLitEnd, match, length1);
             op = oLitEnd + length1;
             sequence.matchLength -= length1;
-            match = base;
+            match = prefixStart;
     }   }
-    while (op < oMatchEnd) *op++ = *match++;
+    ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
     return sequenceLength;
 }
 
-
 HINT_INLINE
 size_t ZSTD_execSequence(BYTE* op,
                          BYTE* const oend, seq_t sequence,
@@ -634,20 +713,29 @@
     const BYTE* const iLitEnd = *litPtr + sequence.litLength;
     const BYTE* match = oLitEnd - sequence.offset;
 
-    /* check */
-    RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
-    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
-    if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+    /* Errors and uncommon cases handled here. */
+    assert(oLitEnd < oMatchEnd);
+    if (iLitEnd > litLimit || oMatchEnd > oend_w)
+        return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+
+    /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
+    assert(iLitEnd <= litLimit /* Literal length is in bounds */);
+    assert(oLitEnd <= oend_w /* Can wildcopy literals */);
+    assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
 
-    /* copy Literals */
-    if (sequence.litLength > 8)
-        ZSTD_wildcopy_16min(op, (*litPtr), sequence.litLength, ZSTD_no_overlap);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
-    else
-        ZSTD_copy8(op, *litPtr);
+    /* Copy Literals:
+     * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
+     * We likely don't need the full 32-byte wildcopy.
+     */
+    assert(WILDCOPY_OVERLENGTH >= 16);
+    ZSTD_copy16(op, (*litPtr));
+    if (sequence.litLength > 16) {
+        ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
+    }
     op = oLitEnd;
     *litPtr = iLitEnd;   /* update for next sequence */
 
-    /* copy Match */
+    /* Copy Match */
     if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
         /* offset beyond prefix -> go into extDict */
         RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
@@ -662,123 +750,33 @@
             op = oLitEnd + length1;
             sequence.matchLength -= length1;
             match = prefixStart;
-            if (op > oend_w || sequence.matchLength < MINMATCH) {
-              U32 i;
-              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
-              return sequenceLength;
-            }
     }   }
-    /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
-
-    /* match within prefix */
-    if (sequence.offset < 8) {
-        /* close range match, overlap */
-        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
-        int const sub2 = dec64table[sequence.offset];
-        op[0] = match[0];
-        op[1] = match[1];
-        op[2] = match[2];
-        op[3] = match[3];
-        match += dec32table[sequence.offset];
-        ZSTD_copy4(op+4, match);
-        match -= sub2;
-    } else {
-        ZSTD_copy8(op, match);
-    }
-    op += 8; match += 8;
-
-    if (oMatchEnd > oend-(16-MINMATCH)) {
-        if (op < oend_w) {
-            ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
-            match += oend_w - op;
-            op = oend_w;
-        }
-        while (op < oMatchEnd) *op++ = *match++;
-    } else {
-        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);   /* works even if matchLength < 8 */
-    }
-    return sequenceLength;
-}
-
-
-HINT_INLINE
-size_t ZSTD_execSequenceLong(BYTE* op,
-                             BYTE* const oend, seq_t sequence,
-                             const BYTE** litPtr, const BYTE* const litLimit,
-                             const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
-{
-    BYTE* const oLitEnd = op + sequence.litLength;
-    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
-    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
-    const BYTE* match = sequence.match;
-
-    /* check */
-    RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
-    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
-    if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
+    /* Match within prefix of 1 or more bytes */
+    assert(op <= oMatchEnd);
+    assert(oMatchEnd <= oend_w);
+    assert(match >= prefixStart);
+    assert(sequence.matchLength >= 1);
 
-    /* copy Literals */
-    if (sequence.litLength > 8)
-        ZSTD_wildcopy_16min(op, *litPtr, sequence.litLength, ZSTD_no_overlap);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
-    else
-        ZSTD_copy8(op, *litPtr);  /* note : op <= oLitEnd <= oend_w == oend - 8 */
-
-    op = oLitEnd;
-    *litPtr = iLitEnd;   /* update for next sequence */
+    /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
+     * without overlap checking.
+     */
+    if (sequence.offset >= WILDCOPY_VECLEN) {
+        /* We bet on a full wildcopy for matches, since we expect matches to be
+         * longer than literals (in general). In silesia, ~10% of matches are longer
+         * than 16 bytes.
+         */
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
+        return sequenceLength;
+    }
+    assert(sequence.offset < WILDCOPY_VECLEN);
 
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
-        /* offset beyond prefix */
-        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
-        if (match + sequence.matchLength <= dictEnd) {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {   size_t const length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = prefixStart;
-            if (op > oend_w || sequence.matchLength < MINMATCH) {
-              U32 i;
-              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
-              return sequenceLength;
-            }
-    }   }
-    assert(op <= oend_w);
-    assert(sequence.matchLength >= MINMATCH);
+    /* Copy 8 bytes and spread the offset to be >= 8. */
+    ZSTD_overlapCopy8(&op, &match, sequence.offset);
 
-    /* match within prefix */
-    if (sequence.offset < 8) {
-        /* close range match, overlap */
-        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
-        int const sub2 = dec64table[sequence.offset];
-        op[0] = match[0];
-        op[1] = match[1];
-        op[2] = match[2];
-        op[3] = match[3];
-        match += dec32table[sequence.offset];
-        ZSTD_copy4(op+4, match);
-        match -= sub2;
-    } else {
-        ZSTD_copy8(op, match);
-    }
-    op += 8; match += 8;
-
-    if (oMatchEnd > oend-(16-MINMATCH)) {
-        if (op < oend_w) {
-            ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
-            match += oend_w - op;
-            op = oend_w;
-        }
-        while (op < oMatchEnd) *op++ = *match++;
-    } else {
-        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);   /* works even if matchLength < 8 */
+    /* If the match length is > 8 bytes, then continue with the wildcopy. */
+    if (sequence.matchLength > 8) {
+        assert(op < oMatchEnd);
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
     }
     return sequenceLength;
 }
@@ -1098,7 +1096,7 @@
         /* decode and decompress */
         for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
             seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
-            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
             if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
             PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
             sequences[seqNb & STORED_SEQS_MASK] = sequence;
@@ -1109,7 +1107,7 @@
         /* finish queue */
         seqNb -= seqAdvance;
         for ( ; seqNb<nbSeq ; seqNb++) {
-            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
             if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
             op += oneSeqSize;
         }
--- a/contrib/python-zstandard/zstd/deprecated/zbuff.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/deprecated/zbuff.h	Tue Jan 21 13:14:51 2020 -0500
@@ -36,16 +36,17 @@
 *****************************************************************/
 /* Deprecation warnings */
 /* Should these warnings be a problem,
-   it is generally possible to disable them,
-   typically with -Wno-deprecated-declarations for gcc
-   or _CRT_SECURE_NO_WARNINGS in Visual.
-   Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */
+ * it is generally possible to disable them,
+ * typically with -Wno-deprecated-declarations for gcc
+ * or _CRT_SECURE_NO_WARNINGS in Visual.
+ * Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS
+ */
 #ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS
 #  define ZBUFF_DEPRECATED(message) ZSTDLIB_API  /* disable deprecation warnings */
 #else
 #  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
 #    define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API
-#  elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__)
+#  elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
 #    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message)))
 #  elif defined(__GNUC__) && (__GNUC__ >= 3)
 #    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated))
--- a/contrib/python-zstandard/zstd/dictBuilder/cover.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/dictBuilder/cover.c	Tue Jan 21 13:14:51 2020 -0500
@@ -638,8 +638,8 @@
                     "compared to the source size %u! "
                     "size(source)/size(dictionary) = %f, but it should be >= "
                     "10! This may lead to a subpar dictionary! We recommend "
-                    "training on sources at least 10x, and up to 100x the "
-                    "size of the dictionary!\n", (U32)maxDictSize,
+                    "training on sources at least 10x, and preferably 100x "
+                    "the size of the dictionary! \n", (U32)maxDictSize,
                     (U32)nbDmers, ratio);
 }
 
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Tue Jan 21 13:14:51 2020 -0500
@@ -571,7 +571,7 @@
     unsigned const prime1 = 2654435761U;
     unsigned const prime2 = 2246822519U;
     unsigned acc = prime1;
-    size_t p=0;;
+    size_t p=0;
     for (p=0; p<length; p++) {
         acc *= prime2;
         ((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);
--- a/contrib/python-zstandard/zstd/zstd.h	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python-zstandard/zstd/zstd.h	Tue Jan 21 13:14:51 2020 -0500
@@ -15,6 +15,7 @@
 #define ZSTD_H_235446
 
 /* ======   Dependency   ======*/
+#include <limits.h>   /* INT_MAX */
 #include <stddef.h>   /* size_t */
 
 
@@ -71,7 +72,7 @@
 /*------   Version   ------*/
 #define ZSTD_VERSION_MAJOR    1
 #define ZSTD_VERSION_MINOR    4
-#define ZSTD_VERSION_RELEASE  3
+#define ZSTD_VERSION_RELEASE  4
 
 #define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
 ZSTDLIB_API unsigned ZSTD_versionNumber(void);   /**< to check runtime library version */
@@ -196,9 +197,13 @@
 ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);
 
 /*! ZSTD_compressCCtx() :
- *  Same as ZSTD_compress(), using an explicit ZSTD_CCtx
- *  The function will compress at requested compression level,
- *  ignoring any other parameter */
+ *  Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
+ *  Important : in order to behave similarly to `ZSTD_compress()`,
+ *  this function compresses at requested compression level,
+ *  __ignoring any other parameter__ .
+ *  If any advanced parameter was set using the advanced API,
+ *  they will all be reset. Only `compressionLevel` remains.
+ */
 ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
                                      void* dst, size_t dstCapacity,
                                const void* src, size_t srcSize,
@@ -233,7 +238,7 @@
  *   using ZSTD_CCtx_set*() functions.
  *   Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
  *   "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
- *   They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()
+ *   __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
  *
  *   It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
  *
@@ -261,18 +266,26 @@
 
     /* compression parameters
      * Note: When compressing with a ZSTD_CDict these parameters are superseded
-     * by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict()
-     * for more info (superseded-by-cdict). */
-    ZSTD_c_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
+     * by the parameters used to construct the ZSTD_CDict.
+     * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
+    ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
+                              * Note that exact compression parameters are dynamically determined,
+                              * depending on both compression level and srcSize (when known).
                               * Default level is ZSTD_CLEVEL_DEFAULT==3.
                               * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
                               * Note 1 : it's possible to pass a negative compression level.
-                              * Note 2 : setting a level sets all default values of other compression parameters */
+                              * Note 2 : setting a level resets all other compression parameters to default */
+    /* Advanced compression parameters :
+     * It's possible to pin down compression parameters to some specific values.
+     * In which case, these values are no longer dynamically selected by the compressor */
     ZSTD_c_windowLog=101,    /* Maximum allowed back-reference distance, expressed as power of 2.
+                              * This will set a memory budget for streaming decompression,
+                              * with larger values requiring more memory
+                              * and typically compressing more.
                               * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
                               * Special: value 0 means "use default windowLog".
                               * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
-                              *       requires explicitly allowing such window size at decompression stage if using streaming. */
+                              *       requires explicitly allowing such size at streaming decompression stage. */
     ZSTD_c_hashLog=102,      /* Size of the initial probe table, as a power of 2.
                               * Resulting memory usage is (1 << (hashLog+2)).
                               * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
@@ -283,13 +296,13 @@
                               * Resulting memory usage is (1 << (chainLog+2)).
                               * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
                               * Larger tables result in better and slower compression.
-                              * This parameter is useless when using "fast" strategy.
+                              * This parameter is useless for "fast" strategy.
                               * It's still useful when using "dfast" strategy,
                               * in which case it defines a secondary probe table.
                               * Special: value 0 means "use default chainLog". */
     ZSTD_c_searchLog=104,    /* Number of search attempts, as a power of 2.
                               * More attempts result in better and slower compression.
-                              * This parameter is useless when using "fast" and "dFast" strategies.
+                              * This parameter is useless for "fast" and "dFast" strategies.
                               * Special: value 0 means "use default searchLog". */
     ZSTD_c_minMatch=105,     /* Minimum size of searched matches.
                               * Note that Zstandard can still find matches of smaller size,
@@ -344,7 +357,7 @@
     ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
                               * Content size must be known at the beginning of compression.
                               * This is automatically the case when using ZSTD_compress2(),
-                              * For streaming variants, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
+                              * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
     ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
     ZSTD_c_dictIDFlag=202,   /* When applicable, dictionary's ID is written into frame header (default:1) */
 
@@ -363,7 +376,7 @@
                               * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
                               * 0 means default, which is dynamically determined based on compression parameters.
                               * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
-                              * The minimum size is automatically and transparently enforced */
+                              * The minimum size is automatically and transparently enforced. */
     ZSTD_c_overlapLog=402,   /* Control the overlap size, as a fraction of window size.
                               * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
                               * It helps preserve compression ratio, while each job is compressed in parallel.
@@ -386,6 +399,7 @@
      * ZSTD_c_forceAttachDict
      * ZSTD_c_literalCompressionMode
      * ZSTD_c_targetCBlockSize
+     * ZSTD_c_srcSizeHint
      * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
      * note : never ever use experimentalParam? names directly;
      *        also, the enums values themselves are unstable and can still change.
@@ -396,6 +410,7 @@
      ZSTD_c_experimentalParam4=1001,
      ZSTD_c_experimentalParam5=1002,
      ZSTD_c_experimentalParam6=1003,
+     ZSTD_c_experimentalParam7=1004
 } ZSTD_cParameter;
 
 typedef struct {
@@ -793,12 +808,17 @@
 typedef struct ZSTD_CDict_s ZSTD_CDict;
 
 /*! ZSTD_createCDict() :
- *  When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
- *  ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
+ *  When compressing multiple messages or blocks using the same dictionary,
+ *  it's recommended to digest the dictionary only once, since it's a costly operation.
+ *  ZSTD_createCDict() will create a state from digesting a dictionary.
+ *  The resulting state can be used for future compression operations with very limited startup cost.
  *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- * `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
- *  Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
- *  Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */
+ * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
+ *  Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
+ *  Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
+ *      in which case the only thing that it transports is the @compressionLevel.
+ *      This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
+ *      expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
 ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
                                          int compressionLevel);
 
@@ -925,7 +945,7 @@
  *  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
  *           It's a CPU consuming operation, with non-negligible impact on latency.
  *           If there is a need to use the same prefix multiple times, consider loadDictionary instead.
- *  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent).
+ *  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
  *           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
 ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
                                  const void* prefix, size_t prefixSize);
@@ -969,7 +989,7 @@
  *  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
  *           Prefix buffer must remain unmodified up to the end of frame,
  *           reached when ZSTD_decompressStream() returns 0.
- *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
+ *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
  *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
  *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
  *           A full dictionary is more costly, as it requires building tables.
@@ -1014,8 +1034,8 @@
  * Some of them might be removed in the future (especially when redundant with existing stable functions)
  * ***************************************************************************************/
 
-#define ZSTD_FRAMEHEADERSIZE_PREFIX 5   /* minimum input size required to query frame header size */
-#define ZSTD_FRAMEHEADERSIZE_MIN    6
+#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1)   /* minimum input size required to query frame header size */
+#define ZSTD_FRAMEHEADERSIZE_MIN(format)    ((format) == ZSTD_f_zstd1 ? 6 : 2)
 #define ZSTD_FRAMEHEADERSIZE_MAX   18   /* can be useful for static allocation */
 #define ZSTD_SKIPPABLEHEADERSIZE    8
 
@@ -1063,6 +1083,8 @@
 /* Advanced parameter bounds */
 #define ZSTD_TARGETCBLOCKSIZE_MIN   64
 #define ZSTD_TARGETCBLOCKSIZE_MAX   ZSTD_BLOCKSIZE_MAX
+#define ZSTD_SRCSIZEHINT_MIN        0
+#define ZSTD_SRCSIZEHINT_MAX        INT_MAX
 
 /* internal */
 #define ZSTD_HASHLOG3_MAX           17
@@ -1073,6 +1095,24 @@
 typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
 
 typedef struct {
+    unsigned int matchPos; /* Match pos in dst */
+    /* If seqDef.offset > 3, then this is seqDef.offset - 3
+     * If seqDef.offset < 3, then this is the corresponding repeat offset
+     * But if seqDef.offset < 3 and litLength == 0, this is the
+     *   repeat offset before the corresponding repeat offset
+     * And if seqDef.offset == 3 and litLength == 0, this is the
+     *   most recent repeat offset - 1
+     */
+    unsigned int offset;
+    unsigned int litLength; /* Literal length */
+    unsigned int matchLength; /* Match length */
+    /* 0 when seq not rep and seqDef.offset otherwise
+     * when litLength == 0 this will be <= 4, otherwise <= 3 like normal
+     */
+    unsigned int rep;
+} ZSTD_Sequence;
+
+typedef struct {
     unsigned windowLog;       /**< largest match distance : larger == more compression, more memory needed during decompression */
     unsigned chainLog;        /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
     unsigned hashLog;         /**< dispatch table : larger == faster, more memory */
@@ -1101,21 +1141,12 @@
 
 typedef enum {
     ZSTD_dlm_byCopy = 0,  /**< Copy dictionary content internally */
-    ZSTD_dlm_byRef = 1,   /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
+    ZSTD_dlm_byRef = 1    /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
 } ZSTD_dictLoadMethod_e;
 
 typedef enum {
-    /* Opened question : should we have a format ZSTD_f_auto ?
-     * Today, it would mean exactly the same as ZSTD_f_zstd1.
-     * But, in the future, should several formats become supported,
-     * on the compression side, it would mean "default format".
-     * On the decompression side, it would mean "automatic format detection",
-     * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
-     * Since meaning is a little different, another option could be to define different enums for compression and decompression.
-     * This question could be kept for later, when there are actually multiple formats to support,
-     * but there is also the question of pinning enum values, and pinning value `0` is especially important */
     ZSTD_f_zstd1 = 0,           /* zstd frame format, specified in zstd_compression_format.md (default) */
-    ZSTD_f_zstd1_magicless = 1, /* Variant of zstd frame format, without initial 4-bytes magic number.
+    ZSTD_f_zstd1_magicless = 1  /* Variant of zstd frame format, without initial 4-bytes magic number.
                                  * Useful to save 4 bytes per generated frame.
                                  * Decoder cannot recognise automatically this format, requiring this instruction. */
 } ZSTD_format_e;
@@ -1126,7 +1157,7 @@
      * to evolve and should be considered only in the context of extremely
      * advanced performance tuning.
      *
-     * Zstd currently supports the use of a CDict in two ways:
+     * Zstd currently supports the use of a CDict in three ways:
      *
      * - The contents of the CDict can be copied into the working context. This
      *   means that the compression can search both the dictionary and input
@@ -1142,6 +1173,12 @@
      *   working context's tables can be reused). For small inputs, this can be
      *   faster than copying the CDict's tables.
      *
+     * - The CDict's tables are not used at all, and instead we use the working
+     *   context alone to reload the dictionary and use params based on the source
+     *   size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
+     *   This method is effective when the dictionary sizes are very small relative
+     *   to the input size, and the input size is fairly large to begin with.
+     *
      * Zstd has a simple internal heuristic that selects which strategy to use
      * at the beginning of a compression. However, if experimentation shows that
      * Zstd is making poor choices, it is possible to override that choice with
@@ -1150,6 +1187,7 @@
     ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
     ZSTD_dictForceAttach   = 1, /* Never copy the dictionary. */
     ZSTD_dictForceCopy     = 2, /* Always copy the dictionary. */
+    ZSTD_dictForceLoad     = 3  /* Always reload the dictionary */
 } ZSTD_dictAttachPref_e;
 
 typedef enum {
@@ -1158,7 +1196,7 @@
                                *   levels will be compressed. */
   ZSTD_lcm_huffman = 1,       /**< Always attempt Huffman compression. Uncompressed literals will still be
                                *   emitted if Huffman compression is not profitable. */
-  ZSTD_lcm_uncompressed = 2,  /**< Always emit uncompressed literals. */
+  ZSTD_lcm_uncompressed = 2   /**< Always emit uncompressed literals. */
 } ZSTD_literalCompressionMode_e;
 
 
@@ -1210,20 +1248,38 @@
  *           or an error code (if srcSize is too small) */
 ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
 
+/*! ZSTD_getSequences() :
+ * Extract sequences from the sequence store
+ * zc can be used to insert custom compression params.
+ * This function invokes ZSTD_compress2
+ * @return : number of sequences extracted
+ */
+ZSTDLIB_API size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+    size_t outSeqsSize, const void* src, size_t srcSize);
+
 
 /***************************************
 *  Memory management
 ***************************************/
 
 /*! ZSTD_estimate*() :
- *  These functions make it possible to estimate memory usage
- *  of a future {D,C}Ctx, before its creation.
- *  ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
- *  It will also consider src size to be arbitrarily "large", which is worst case.
- *  If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
- *  ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
- *  ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
- *  Note : CCtx size estimation is only correct for single-threaded compression. */
+ *  These functions make it possible to estimate memory usage of a future
+ *  {D,C}Ctx, before its creation.
+ *
+ *  ZSTD_estimateCCtxSize() will provide a budget large enough for any
+ *  compression level up to selected one. Unlike ZSTD_estimateCStreamSize*(),
+ *  this estimate does not include space for a window buffer, so this estimate
+ *  is guaranteed to be enough for single-shot compressions, but not streaming
+ *  compressions. It will however assume the input may be arbitrarily large,
+ *  which is the worst case. If srcSize is known to always be small,
+ *  ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
+ *  ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with
+ *  ZSTD_getCParams() to create cParams from compressionLevel.
+ *  ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with
+ *  ZSTD_CCtxParams_setParameter().
+ *
+ *  Note: only single-threaded compression is supported. This function will
+ *  return an error code if ZSTD_c_nbWorkers is >= 1. */
 ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
 ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
 ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
@@ -1334,7 +1390,8 @@
  *  Create a digested dictionary for compression
  *  Dictionary content is just referenced, not duplicated.
  *  As a consequence, `dictBuffer` **must** outlive CDict,
- *  and its content must remain unmodified throughout the lifetime of CDict. */
+ *  and its content must remain unmodified throughout the lifetime of CDict.
+ *  note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
 ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
 
 /*! ZSTD_getCParams() :
@@ -1361,7 +1418,9 @@
 ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
 
 /*! ZSTD_compress_advanced() :
- *  Same as ZSTD_compress_usingDict(), with fine-tune control over compression parameters (by structure) */
+ *  Note : this function is now DEPRECATED.
+ *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
+ *  This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
 ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
                                           void* dst, size_t dstCapacity,
                                     const void* src, size_t srcSize,
@@ -1369,7 +1428,9 @@
                                           ZSTD_parameters params);
 
 /*! ZSTD_compress_usingCDict_advanced() :
- *  Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */
+ *  Note : this function is now REDUNDANT.
+ *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
+ *  This prototype will be marked as deprecated and generate compilation warning in some future version */
 ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
                                               void* dst, size_t dstCapacity,
                                         const void* src, size_t srcSize,
@@ -1441,6 +1502,12 @@
  * There is no guarantee on compressed block size (default:0) */
 #define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
 
+/* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size,
+ * but compression ratio may regress significantly if guess considerably underestimates */
+#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
+
 /*! ZSTD_CCtx_getParameter() :
  *  Get the requested compression parameter value, selected by enum ZSTD_cParameter,
  *  and store it into int* value.
@@ -1613,8 +1680,13 @@
  * pledgedSrcSize must be correct. If it is not known at init time, use
  * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
  * "0" also disables frame content size field. It may be enabled in the future.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
  */
-ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);
+ZSTDLIB_API size_t
+ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
+                         int compressionLevel,
+                         unsigned long long pledgedSrcSize);
+
 /**! ZSTD_initCStream_usingDict() :
  * This function is deprecated, and is equivalent to:
  *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
@@ -1623,42 +1695,66 @@
  *
  * Creates of an internal CDict (incompatible with static CCtx), except if
  * dict == NULL or dictSize < 8, in which case no dict is used.
- * Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if
+ * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
  * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
  */
-ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t
+ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
+                     const void* dict, size_t dictSize,
+                           int compressionLevel);
+
 /**! ZSTD_initCStream_advanced() :
  * This function is deprecated, and is approximately equivalent to:
  *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- *     ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
+ *     // Pseudocode: Set each zstd parameter and leave the rest as-is.
+ *     for ((param, value) : params) {
+ *         ZSTD_CCtx_setParameter(zcs, param, value);
+ *     }
  *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
  *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
  *
- * pledgedSrcSize must be correct. If srcSize is not known at init time, use
- * value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy.
+ * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
+ * pledgedSrcSize must be correct.
+ * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
  */
-ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
-                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);
+ZSTDLIB_API size_t
+ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+                    const void* dict, size_t dictSize,
+                          ZSTD_parameters params,
+                          unsigned long long pledgedSrcSize);
+
 /**! ZSTD_initCStream_usingCDict() :
  * This function is deprecated, and equivalent to:
  *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
  *     ZSTD_CCtx_refCDict(zcs, cdict);
  *
  * note : cdict will just be referenced, and must outlive compression session
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
  */
 ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+
 /**! ZSTD_initCStream_usingCDict_advanced() :
- * This function is deprecated, and is approximately equivalent to:
+ *   This function is DEPRECATED, and is approximately equivalent to:
  *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- *     ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
+ *     // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
+ *     for ((fParam, value) : fParams) {
+ *         ZSTD_CCtx_setParameter(zcs, fParam, value);
+ *     }
  *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
  *     ZSTD_CCtx_refCDict(zcs, cdict);
  *
  * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
  * pledgedSrcSize must be correct. If srcSize is not known at init time, use
  * value ZSTD_CONTENTSIZE_UNKNOWN.
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
  */
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);
+ZSTDLIB_API size_t
+ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+                               const ZSTD_CDict* cdict,
+                                     ZSTD_frameParameters fParams,
+                                     unsigned long long pledgedSrcSize);
 
 /*! ZSTD_resetCStream() :
  * This function is deprecated, and is equivalent to:
@@ -1673,6 +1769,7 @@
  *  For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
  *  but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
  * @return : 0, or an error code (which can be tested using ZSTD_isError())
+ *  Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
  */
 ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
 
@@ -1718,8 +1815,10 @@
  *     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
  *
  * note: no dictionary will be used if dict == NULL or dictSize < 8
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
  */
 ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+
 /**
  * This function is deprecated, and is equivalent to:
  *
@@ -1727,14 +1826,17 @@
  *     ZSTD_DCtx_refDDict(zds, ddict);
  *
  * note : ddict is referenced, it must outlive decompression session
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
  */
 ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+
 /**
  * This function is deprecated, and is equivalent to:
  *
  *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
  *
  * re-use decompression parameters from previous init; saves dictionary loading
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
  */
 ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
 
@@ -1908,7 +2010,7 @@
 
 /*!
     Block functions produce and decode raw zstd blocks, without frame metadata.
-    Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+    Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
     But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
 
     A few rules to respect :
--- a/contrib/python3-ratchet.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/python3-ratchet.py	Tue Jan 21 13:14:51 2020 -0500
@@ -60,7 +60,7 @@
     )
     p.add_argument(
         '-j',
-        default=os.sysconf(r'SC_NPROCESSORS_ONLN'),
+        default=os.sysconf('SC_NPROCESSORS_ONLN'),
         type=int,
         help='Number of parallel tests to run.',
     )
--- a/contrib/relnotes	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/relnotes	Tue Jan 21 13:14:51 2020 -0500
@@ -98,6 +98,7 @@
     (r"shelve|unshelve", "extensions"),
 ]
 
+
 def wikify(desc):
     desc = desc.replace("(issue", "(Bts:issue")
     desc = re.sub(r"\b([0-9a-f]{12})\b", r"Cset:\1", desc)
@@ -107,6 +108,7 @@
     desc = re.sub(r"\b(\S*__\S*)\b", r"`\1`", desc)
     return desc
 
+
 def main():
     desc = "example: %(prog)s 4.7.2 --stoprev 4.8rc0"
     ap = argparse.ArgumentParser(description=desc)
@@ -200,5 +202,6 @@
     for d in sorted(apis):
         print(" * %s" % d)
 
+
 if __name__ == "__main__":
     main()
--- a/contrib/simplemerge	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/simplemerge	Tue Jan 21 13:14:51 2020 -0500
@@ -5,6 +5,7 @@
 import sys
 
 import hgdemandimport
+
 hgdemandimport.enable()
 
 from mercurial.i18n import _
@@ -16,44 +17,54 @@
     simplemerge,
     ui as uimod,
 )
-from mercurial.utils import (
-    procutil,
-    stringutil
-)
+from mercurial.utils import procutil, stringutil
 
-options = [(b'L', b'label', [], _(b'labels to use on conflict markers')),
-           (b'a', b'text', None, _(b'treat all files as text')),
-           (b'p', b'print', None,
-            _(b'print results instead of overwriting LOCAL')),
-           (b'', b'no-minimal', None, _(b'no effect (DEPRECATED)')),
-           (b'h', b'help', None, _(b'display help and exit')),
-           (b'q', b'quiet', None, _(b'suppress output'))]
+options = [
+    (b'L', b'label', [], _(b'labels to use on conflict markers')),
+    (b'a', b'text', None, _(b'treat all files as text')),
+    (b'p', b'print', None, _(b'print results instead of overwriting LOCAL')),
+    (b'', b'no-minimal', None, _(b'no effect (DEPRECATED)')),
+    (b'h', b'help', None, _(b'display help and exit')),
+    (b'q', b'quiet', None, _(b'suppress output')),
+]
 
-usage = _(b'''simplemerge [OPTS] LOCAL BASE OTHER
+usage = _(
+    b'''simplemerge [OPTS] LOCAL BASE OTHER
 
     Simple three-way file merge utility with a minimal feature set.
 
     Apply to LOCAL the changes necessary to go from BASE to OTHER.
 
     By default, LOCAL is overwritten with the results of this operation.
-''')
+'''
+)
+
 
 class ParseError(Exception):
     """Exception raised on errors in parsing the command line."""
 
+
 def showhelp():
     pycompat.stdout.write(usage)
     pycompat.stdout.write(b'\noptions:\n')
 
     out_opts = []
     for shortopt, longopt, default, desc in options:
-        out_opts.append((b'%2s%s' % (shortopt and b'-%s' % shortopt,
-                                     longopt and b' --%s' % longopt),
-                         b'%s' % desc))
+        out_opts.append(
+            (
+                b'%2s%s'
+                % (
+                    shortopt and b'-%s' % shortopt,
+                    longopt and b' --%s' % longopt,
+                ),
+                b'%s' % desc,
+            )
+        )
     opts_len = max([len(opt[0]) for opt in out_opts])
     for first, second in out_opts:
         pycompat.stdout.write(b' %-*s  %s\n' % (opts_len, first, second))
 
+
 try:
     for fp in (sys.stdin, pycompat.stdout, sys.stderr):
         procutil.setbinary(fp)
@@ -68,13 +79,17 @@
         showhelp()
         sys.exit(0)
     if len(args) != 3:
-            raise ParseError(_(b'wrong number of arguments').decode('utf8'))
+        raise ParseError(_(b'wrong number of arguments').decode('utf8'))
     local, base, other = args
-    sys.exit(simplemerge.simplemerge(uimod.ui.load(),
-                                     context.arbitraryfilectx(local),
-                                     context.arbitraryfilectx(base),
-                                     context.arbitraryfilectx(other),
-                                     **pycompat.strkwargs(opts)))
+    sys.exit(
+        simplemerge.simplemerge(
+            uimod.ui.load(),
+            context.arbitraryfilectx(local),
+            context.arbitraryfilectx(base),
+            context.arbitraryfilectx(other),
+            **pycompat.strkwargs(opts)
+        )
+    )
 except ParseError as e:
     e = stringutil.forcebytestr(e)
     pycompat.stdout.write(b"%s: %s\n" % (sys.argv[0].encode('utf8'), e))
--- a/contrib/testparseutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/testparseutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -80,7 +80,7 @@
 ####################
 
 
-class embeddedmatcher(object):
+class embeddedmatcher(object):  # pytype: disable=ignored-metaclass
     """Base class to detect embedded code fragments in *.t test script
     """
 
@@ -331,9 +331,9 @@
         )
         self._fileres = [
             # "cat > NAME << LIMIT" case
-            re.compile(r'  \$ \s*cat' + namepat + heredoclimitpat),
+            re.compile(r' {2}\$ \s*cat' + namepat + heredoclimitpat),
             # "cat << LIMIT > NAME" case
-            re.compile(r'  \$ \s*cat' + heredoclimitpat + namepat),
+            re.compile(r' {2}\$ \s*cat' + heredoclimitpat + namepat),
         ]
 
     def startsat(self, line):
@@ -426,7 +426,7 @@
     """
 
     _prefix = '  >>> '
-    _prefixre = re.compile(r'  (>>>|\.\.\.) ')
+    _prefixre = re.compile(r' {2}(>>>|\.\.\.) ')
 
     # If a line matches against not _prefixre but _outputre, that line
     # is "an expected output line" (= not a part of code fragment).
@@ -436,7 +436,7 @@
     # run-tests.py. But "directive line inside inline python code"
     # should be rejected by Mercurial reviewers. Therefore, this
     # regexp does not matche against such directive lines.
-    _outputre = re.compile(r'  $|  [^$]')
+    _outputre = re.compile(r' {2}$| {2}[^$]')
 
     def __init__(self):
         super(pydoctestmatcher, self).__init__("doctest style python code")
@@ -509,7 +509,7 @@
     _prefix = '  > '
 
     _startre = re.compile(
-        r'  \$ (\$PYTHON|"\$PYTHON"|python).*' + heredoclimitpat
+        r' {2}\$ (\$PYTHON|"\$PYTHON"|python).*' + heredoclimitpat
     )
 
     def __init__(self):
--- a/contrib/undumprevlog	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/undumprevlog	Tue Jan 21 13:14:51 2020 -0500
@@ -14,16 +14,15 @@
     transaction,
     vfs as vfsmod,
 )
-from mercurial.utils import (
-    procutil,
-)
+from mercurial.utils import procutil
 
 for fp in (sys.stdin, sys.stdout, sys.stderr):
     procutil.setbinary(fp)
 
 opener = vfsmod.vfs(b'.', False)
-tr = transaction.transaction(sys.stderr.write, opener, {b'store': opener},
-                             b"undump.journal")
+tr = transaction.transaction(
+    sys.stderr.write, opener, {b'store': opener}, b"undump.journal"
+)
 while True:
     l = sys.stdin.readline()
     if not l:
@@ -42,9 +41,9 @@
         p2 = node.bin(p[1])
     elif l.startswith("length:"):
         length = int(l[8:-1])
-        sys.stdin.readline() # start marker
+        sys.stdin.readline()  # start marker
         d = encoding.strtolocal(sys.stdin.read(length))
-        sys.stdin.readline() # end marker
+        sys.stdin.readline()  # end marker
         r.addrevision(d, tr, lr, p1, p2)
 
 tr.close()
--- a/contrib/vagrant/Vagrantfile	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/vagrant/Vagrantfile	Tue Jan 21 13:14:51 2020 -0500
@@ -1,8 +1,8 @@
 # -*- mode: ruby -*-
 
 Vagrant.configure('2') do |config|
-  # Debian 8.1 x86_64 without configuration management software
-  config.vm.box = "debian/jessie64"
+  # Debian 10.1 x86_64 without configuration management software
+  config.vm.box = "debian/buster64"
   config.vm.hostname = "tests"
 
   config.vm.define "tests" do |conf|
--- a/contrib/win32/ReadMe.html	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/win32/ReadMe.html	Tue Jan 21 13:14:51 2020 -0500
@@ -140,8 +140,7 @@
     </p>
 
     <p>
-      Mercurial is Copyright 2005-2019 Matt Mackall and others. See
-      the <tt>Contributors.txt</tt> file for a list of contributors.
+      Mercurial is Copyright 2005-2019 Matt Mackall and others.
     </p>
 
     <p>
--- a/contrib/win32/mercurial.ini	Thu Jan 09 14:19:20 2020 -0500
+++ b/contrib/win32/mercurial.ini	Tue Jan 21 13:14:51 2020 -0500
@@ -16,7 +16,7 @@
 
 [ui]
 ; editor used to enter commit logs, etc.  Most text editors will work.
-editor = notepad
+; editor = notepad
 ; show changed files and be a bit more verbose if True
 ; verbose = True
 ; colorize commands output
--- a/doc/Makefile	Thu Jan 09 14:19:20 2020 -0500
+++ b/doc/Makefile	Tue Jan 21 13:14:51 2020 -0500
@@ -1,8 +1,8 @@
-SOURCES=$(notdir $(wildcard ../mercurial/help/*.[0-9].txt))
+SOURCES=$(notdir $(wildcard ../mercurial/helptext/*.[0-9].txt))
 MAN=$(SOURCES:%.txt=%)
 HTML=$(SOURCES:%.txt=%.html)
 GENDOC=gendoc.py ../mercurial/commands.py ../mercurial/help.py \
-	../mercurial/help/*.txt ../hgext/*.py ../hgext/*/__init__.py
+	../mercurial/helptext/*.txt ../hgext/*.py ../hgext/*/__init__.py
 PREFIX=/usr/local
 MANDIR=$(PREFIX)/share/man
 INSTALL=install -c -m 644
--- a/doc/docchecker	Thu Jan 09 14:19:20 2020 -0500
+++ b/doc/docchecker	Tue Jan 21 13:14:51 2020 -0500
@@ -15,6 +15,7 @@
 
 try:
     import msvcrt
+
     msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
     msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
 except ImportError:
@@ -25,14 +26,18 @@
 leadingline = re.compile(br'(^\s*)(\S.*)$')
 
 checks = [
-  (br""":hg:`[^`]*'[^`]*`""",
-   b"""warning: please avoid nesting ' in :hg:`...`"""),
-  (br'\w:hg:`',
-   b'warning: please have a space before :hg:'),
-  (br"""(?:[^a-z][^'.])hg ([^,;"`]*'(?!hg)){2}""",
-   b'''warning: please use " instead of ' for hg ... "..."'''),
+    (
+        br""":hg:`[^`]*'[^`]*`""",
+        b"""warning: please avoid nesting ' in :hg:`...`""",
+    ),
+    (br'\w:hg:`', b'warning: please have a space before :hg:'),
+    (
+        br"""(?:[^a-z][^'.])hg ([^,;"`]*'(?!hg)){2}""",
+        b'''warning: please use " instead of ' for hg ... "..."''',
+    ),
 ]
 
+
 def check(line):
     messages = []
     for match, msg in checks:
@@ -43,6 +48,7 @@
         for msg in messages:
             stdout.write(b'%s\n' % msg)
 
+
 def work(file):
     (llead, lline) = (b'', b'')
 
@@ -55,8 +61,8 @@
             continue
 
         lead, line = match.group(1), match.group(2)
-        if (lead == llead):
-            if (lline != b''):
+        if lead == llead:
+            if lline != b'':
                 lline += b' ' + line
             else:
                 lline = line
@@ -65,6 +71,7 @@
             (llead, lline) = (lead, line)
     check(lline)
 
+
 def main():
     for f in sys.argv[1:]:
         try:
@@ -73,4 +80,5 @@
         except BaseException as e:
             sys.stdout.write(r"failed to process %s: %s\n" % (f, e))
 
+
 main()
--- a/doc/gendoc.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/doc/gendoc.py	Tue Jan 21 13:14:51 2020 -0500
@@ -20,17 +20,13 @@
 
 # This script is executed during installs and may not have C extensions
 # available. Relax C module requirements.
-os.environ[r'HGMODULEPOLICY'] = r'allow'
+os.environ['HGMODULEPOLICY'] = 'allow'
 # import from the live mercurial repo
-sys.path.insert(0, r"..")
+sys.path.insert(0, "..")
 from mercurial import demandimport
 
 demandimport.enable()
-# Load util so that the locale path is set by i18n.setdatapath() before
-# calling _().
-from mercurial import util
 
-util.datapath
 from mercurial import (
     commands,
     encoding,
--- a/doc/runrst	Thu Jan 09 14:19:20 2020 -0500
+++ b/doc/runrst	Tue Jan 21 13:14:51 2020 -0500
@@ -15,20 +15,25 @@
 from __future__ import absolute_import
 
 import sys
+
 try:
     import docutils.core as core
     import docutils.nodes as nodes
     import docutils.utils as utils
     import docutils.parsers.rst.roles as roles
 except ImportError:
-    sys.stderr.write("abort: couldn't generate documentation: docutils "
-                     "module is missing\n")
-    sys.stderr.write("please install python-docutils or see "
-                     "http://docutils.sourceforge.net/\n")
+    sys.stderr.write(
+        "abort: couldn't generate documentation: docutils "
+        "module is missing\n"
+    )
+    sys.stderr.write(
+        "please install python-docutils or see "
+        "http://docutils.sourceforge.net/\n"
+    )
     sys.exit(-1)
 
-def role_hg(name, rawtext, text, lineno, inliner,
-            options={}, content=[]):
+
+def role_hg(name, rawtext, text, lineno, inliner, options=None, content=None):
     text = "hg " + utils.unescape(text)
     linktext = nodes.literal(rawtext, text)
     parts = text.split()
@@ -47,10 +52,10 @@
             refuri = "hg.1.html#%s" % args[1]
         else:
             refuri = "hg.1.html#%s" % args[0]
-    node = nodes.reference(rawtext, '', linktext,
-                           refuri=refuri)
+    node = nodes.reference(rawtext, '', linktext, refuri=refuri)
     return [node], []
 
+
 roles.register_local_role("hg", role_hg)
 
 if __name__ == "__main__":
--- a/hg	Thu Jan 09 14:19:20 2020 -0500
+++ b/hg	Tue Jan 21 13:14:51 2020 -0500
@@ -15,22 +15,29 @@
 
 if libdir != '@' 'LIBDIR' '@':
     if not os.path.isabs(libdir):
-        libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
-                              libdir)
+        libdir = os.path.join(
+            os.path.dirname(os.path.realpath(__file__)), libdir
+        )
         libdir = os.path.abspath(libdir)
     sys.path.insert(0, libdir)
 
 from hgdemandimport import tracing
+
 with tracing.log('hg script'):
     # enable importing on demand to reduce startup time
     try:
         if sys.version_info[0] < 3 or sys.version_info >= (3, 6):
-            import hgdemandimport; hgdemandimport.enable()
+            import hgdemandimport
+
+            hgdemandimport.enable()
     except ImportError:
-        sys.stderr.write("abort: couldn't find mercurial libraries in [%s]\n" %
-                         ' '.join(sys.path))
+        sys.stderr.write(
+            "abort: couldn't find mercurial libraries in [%s]\n"
+            % ' '.join(sys.path)
+        )
         sys.stderr.write("(check your install and PYTHONPATH)\n")
         sys.exit(-1)
 
     from mercurial import dispatch
+
     dispatch.run()
--- a/hgdemandimport/demandimportpy2.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgdemandimport/demandimportpy2.py	Tue Jan 21 13:14:51 2020 -0500
@@ -70,9 +70,9 @@
             head = name
             after = []
         object.__setattr__(
-            self, r"_data", (head, globals, locals, after, level, set())
+            self, "_data", (head, globals, locals, after, level, set())
         )
-        object.__setattr__(self, r"_module", None)
+        object.__setattr__(self, "_module", None)
 
     def _extend(self, name):
         """add to the list of submodules to load"""
@@ -135,15 +135,15 @@
                 if locals:
                     if locals.get(head) is self:
                         locals[head] = mod
-                    elif locals.get(head + r'mod') is self:
-                        locals[head + r'mod'] = mod
+                    elif locals.get(head + 'mod') is self:
+                        locals[head + 'mod'] = mod
 
                 for modname in modrefs:
                     modref = sys.modules.get(modname, None)
                     if modref and getattr(modref, head, None) is self:
                         setattr(modref, head, mod)
 
-                object.__setattr__(self, r"_module", mod)
+                object.__setattr__(self, "_module", mod)
 
     def __repr__(self):
         if self._module:
@@ -303,18 +303,18 @@
 
 
 def enable():
-    "enable global demand-loading of modules"
+    """enable global demand-loading of modules"""
     builtins.__import__ = _demandimport
 
 
 def disable():
-    "disable global demand-loading of modules"
+    """disable global demand-loading of modules"""
     builtins.__import__ = _origimport
 
 
 @contextmanager
 def deactivated():
-    "context manager for disabling demandimport in 'with' blocks"
+    """context manager for disabling demandimport in 'with' blocks"""
     demandenabled = isenabled()
     if demandenabled:
         disable()
--- a/hgdemandimport/demandimportpy3.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgdemandimport/demandimportpy3.py	Tue Jan 21 13:14:51 2020 -0500
@@ -27,8 +27,6 @@
 from __future__ import absolute_import
 
 import contextlib
-import importlib.abc
-import importlib.machinery
 import importlib.util
 import sys
 
@@ -36,6 +34,12 @@
 
 _deactivated = False
 
+# Python 3.5's LazyLoader doesn't work for some reason.
+# https://bugs.python.org/issue26186 is a known issue with extension
+# importing. But it appears to not have a meaningful effect with
+# Mercurial.
+_supported = sys.version_info[0:2] >= (3, 6)
+
 
 class _lazyloaderex(importlib.util.LazyLoader):
     """This is a LazyLoader except it also follows the _deactivated global and
@@ -51,29 +55,61 @@
                 super().exec_module(module)
 
 
-# This is 3.6+ because with Python 3.5 it isn't possible to lazily load
-# extensions. See the discussion in https://bugs.python.org/issue26186 for more.
-if sys.version_info[0:2] >= (3, 6):
-    _extensions_loader = _lazyloaderex.factory(
-        importlib.machinery.ExtensionFileLoader
-    )
-else:
-    _extensions_loader = importlib.machinery.ExtensionFileLoader
+class LazyFinder(object):
+    """A wrapper around a ``MetaPathFinder`` that makes loaders lazy.
+
+    ``sys.meta_path`` finders have their ``find_spec()`` called to locate a
+    module. This returns a ``ModuleSpec`` if found or ``None``. The
+    ``ModuleSpec`` has a ``loader`` attribute, which is called to actually
+    load a module.
+
+    Our class wraps an existing finder and overloads its ``find_spec()`` to
+    replace the ``loader`` with our lazy loader proxy.
+
+    We have to use __getattribute__ to proxy the instance because some meta
+    path finders don't support monkeypatching.
+    """
+
+    __slots__ = ("_finder",)
+
+    def __init__(self, finder):
+        object.__setattr__(self, "_finder", finder)
+
+    def __repr__(self):
+        return "<LazyFinder for %r>" % object.__getattribute__(self, "_finder")
+
+    # __bool__ is canonical Python 3. But check-code insists on __nonzero__ being
+    # defined via `def`.
+    def __nonzero__(self):
+        return bool(object.__getattribute__(self, "_finder"))
 
-_bytecode_loader = _lazyloaderex.factory(
-    importlib.machinery.SourcelessFileLoader
-)
-_source_loader = _lazyloaderex.factory(importlib.machinery.SourceFileLoader)
+    __bool__ = __nonzero__
+
+    def __getattribute__(self, name):
+        if name in ("_finder", "find_spec"):
+            return object.__getattribute__(self, name)
 
+        return getattr(object.__getattribute__(self, "_finder"), name)
+
+    def __delattr__(self, name):
+        return delattr(object.__getattribute__(self, "_finder"))
+
+    def __setattr__(self, name, value):
+        return setattr(object.__getattribute__(self, "_finder"), name, value)
 
-def _makefinder(path):
-    return importlib.machinery.FileFinder(
-        path,
-        # This is the order in which loaders are passed in in core Python.
-        (_extensions_loader, importlib.machinery.EXTENSION_SUFFIXES),
-        (_source_loader, importlib.machinery.SOURCE_SUFFIXES),
-        (_bytecode_loader, importlib.machinery.BYTECODE_SUFFIXES),
-    )
+    def find_spec(self, *args, **kwargs):
+        finder = object.__getattribute__(self, "_finder")
+        spec = finder.find_spec(*args, **kwargs)
+
+        # Lazy loader requires exec_module().
+        if (
+            spec is not None
+            and spec.loader is not None
+            and getattr(spec.loader, "exec_module")
+        ):
+            spec.loader = _lazyloaderex(spec.loader)
+
+        return spec
 
 
 ignores = set()
@@ -85,19 +121,30 @@
 
 
 def isenabled():
-    return _makefinder in sys.path_hooks and not _deactivated
+    return not _deactivated and any(
+        isinstance(finder, LazyFinder) for finder in sys.meta_path
+    )
 
 
 def disable():
-    try:
-        while True:
-            sys.path_hooks.remove(_makefinder)
-    except ValueError:
-        pass
+    new_finders = []
+    for finder in sys.meta_path:
+        new_finders.append(
+            finder._finder if isinstance(finder, LazyFinder) else finder
+        )
+    sys.meta_path[:] = new_finders
 
 
 def enable():
-    sys.path_hooks.insert(0, _makefinder)
+    if not _supported:
+        return
+
+    new_finders = []
+    for finder in sys.meta_path:
+        new_finders.append(
+            LazyFinder(finder) if not isinstance(finder, LazyFinder) else finder
+        )
+    sys.meta_path[:] = new_finders
 
 
 @contextlib.contextmanager
--- a/hgext/absorb.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/absorb.py	Tue Jan 21 13:14:51 2020 -0500
@@ -511,7 +511,7 @@
         if not editedtext:
             raise error.Abort(_(b'empty editor text'))
         # parse edited result
-        contents = [b'' for i in self.fctxs]
+        contents = [b''] * len(self.fctxs)
         leftpadpos = 4
         colonpos = leftpadpos + len(visiblefctxs) + 1
         for l in mdiff.splitnewlines(editedtext):
--- a/hgext/acl.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/acl.py	Tue Jan 21 13:14:51 2020 -0500
@@ -369,8 +369,8 @@
         return
 
     user = None
-    if source == b'serve' and r'url' in kwargs:
-        url = kwargs[r'url'].split(b':')
+    if source == b'serve' and 'url' in kwargs:
+        url = kwargs['url'].split(b':')
         if url[0] == b'remote' and url[1].startswith(b'http'):
             user = urlreq.unquote(url[3])
 
@@ -386,9 +386,9 @@
 
 
 def _pkhook(ui, repo, hooktype, node, source, user, **kwargs):
-    if kwargs[r'namespace'] == b'bookmarks':
-        bookmark = kwargs[r'key']
-        ctx = kwargs[r'new']
+    if kwargs['namespace'] == b'bookmarks':
+        bookmark = kwargs['key']
+        ctx = kwargs['new']
         allowbookmarks = buildmatch(ui, None, user, b'acl.allow.bookmarks')
         denybookmarks = buildmatch(ui, None, user, b'acl.deny.bookmarks')
 
--- a/hgext/beautifygraph.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/beautifygraph.py	Tue Jan 21 13:14:51 2020 -0500
@@ -94,7 +94,7 @@
         ui.warn(_(b'beautifygraph: unsupported encoding, UTF-8 required\n'))
         return
 
-    if r'A' in encoding._wide:
+    if 'A' in encoding._wide:
         ui.warn(
             _(
                 b'beautifygraph: unsupported terminal settings, '
--- a/hgext/blackbox.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/blackbox.py	Tue Jan 21 13:14:51 2020 -0500
@@ -201,7 +201,7 @@
     if not repo.vfs.exists(b'blackbox.log'):
         return
 
-    limit = opts.get(r'limit')
+    limit = opts.get('limit')
     fp = repo.vfs(b'blackbox.log', b'r')
     lines = fp.read().split(b'\n')
 
--- a/hgext/bookflow.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/bookflow.py	Tue Jan 21 13:14:51 2020 -0500
@@ -101,7 +101,7 @@
 
 
 def commands_branch(orig, ui, repo, label=None, **opts):
-    if label and not opts.get(r'clean') and not opts.get(r'rev'):
+    if label and not opts.get('clean') and not opts.get('rev'):
         raise error.Abort(
             _(
                 b"creating named branches is disabled and you should use bookmarks"
--- a/hgext/bugzilla.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/bugzilla.py	Tue Jan 21 13:14:51 2020 -0500
@@ -612,7 +612,7 @@
             self.ui.warn(_(b"Bugzilla/MySQL cannot update bug state\n"))
 
         (user, userid) = self.get_bugzilla_user(committer)
-        now = time.strftime(r'%Y-%m-%d %H:%M:%S')
+        now = time.strftime('%Y-%m-%d %H:%M:%S')
         self.run(
             '''insert into longdescs
                     (bug_id, who, bug_when, thetext)
@@ -1099,7 +1099,6 @@
         the given changeset in their comments.
         '''
         start = 0
-        hours = 0.0
         bugs = {}
         bugmatch = self.bug_re.search(ctx.description(), start)
         fixmatch = self.fix_re.search(ctx.description(), start)
--- a/hgext/censor.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/censor.py	Tue Jan 21 13:14:51 2020 -0500
@@ -23,6 +23,9 @@
 ``hg update``, must be capable of tolerating censored data to continue to
 function in a meaningful way. Such commands only tolerate censored file
 revisions if they are allowed by the "censor.policy=ignore" config option.
+
+A few informative commands such as ``hg grep`` will unconditionally
+ignore censored data and merely report that it was encountered.
 """
 
 from __future__ import absolute_import
--- a/hgext/churn.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/churn.py	Tue Jan 21 13:14:51 2020 -0500
@@ -197,7 +197,7 @@
         return s + b" " * (l - encoding.colwidth(s))
 
     amap = {}
-    aliases = opts.get(r'aliases')
+    aliases = opts.get('aliases')
     if not aliases and os.path.exists(repo.wjoin(b'.hgchurn')):
         aliases = repo.wjoin(b'.hgchurn')
     if aliases:
@@ -215,7 +215,7 @@
     if not rate:
         return
 
-    if opts.get(r'sort'):
+    if opts.get('sort'):
         rate.sort()
     else:
         rate.sort(key=lambda x: (-sum(x[1]), x))
@@ -228,7 +228,7 @@
     ui.debug(b"assuming %i character terminal\n" % ttywidth)
     width = ttywidth - maxname - 2 - 2 - 2
 
-    if opts.get(r'diffstat'):
+    if opts.get('diffstat'):
         width -= 15
 
         def format(name, diffstat):
--- a/hgext/commitextras.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/commitextras.py	Tue Jan 21 13:14:51 2020 -0500
@@ -58,7 +58,7 @@
 
     class repoextra(repo.__class__):
         def commit(self, *innerpats, **inneropts):
-            extras = opts.get(r'extra')
+            extras = opts.get('extra')
             for raw in extras:
                 if b'=' not in raw:
                     msg = _(
@@ -82,7 +82,7 @@
                         b"manually"
                     )
                     raise error.Abort(msg % k)
-                inneropts[r'extra'][k] = v
+                inneropts['extra'][k] = v
             return super(repoextra, self).commit(*innerpats, **inneropts)
 
     repo.__class__ = repoextra
--- a/hgext/convert/common.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/convert/common.py	Tue Jan 21 13:14:51 2020 -0500
@@ -57,7 +57,7 @@
 def shlexer(data=None, filepath=None, wordchars=None, whitespace=None):
     if data is None:
         if pycompat.ispy3:
-            data = open(filepath, b'r', encoding=r'latin1')
+            data = open(filepath, b'r', encoding='latin1')
         else:
             data = open(filepath, b'r')
     else:
@@ -493,7 +493,7 @@
         # POSIX requires at least 4096 bytes for ARG_MAX
         argmax = 4096
         try:
-            argmax = os.sysconf(r"SC_ARG_MAX")
+            argmax = os.sysconf("SC_ARG_MAX")
         except (AttributeError, ValueError):
             pass
 
--- a/hgext/convert/convcmd.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/convert/convcmd.py	Tue Jan 21 13:14:51 2020 -0500
@@ -56,6 +56,36 @@
 orig_encoding = b'ascii'
 
 
+def readauthormap(ui, authorfile, authors=None):
+    if authors is None:
+        authors = {}
+    with open(authorfile, b'rb') as afile:
+        for line in afile:
+
+            line = line.strip()
+            if not line or line.startswith(b'#'):
+                continue
+
+            try:
+                srcauthor, dstauthor = line.split(b'=', 1)
+            except ValueError:
+                msg = _(b'ignoring bad line in author map file %s: %s\n')
+                ui.warn(msg % (authorfile, line.rstrip()))
+                continue
+
+            srcauthor = srcauthor.strip()
+            dstauthor = dstauthor.strip()
+            if authors.get(srcauthor) in (None, dstauthor):
+                msg = _(b'mapping author %s to %s\n')
+                ui.debug(msg % (srcauthor, dstauthor))
+                authors[srcauthor] = dstauthor
+                continue
+
+            m = _(b'overriding mapping for author %s, was %s, will be %s\n')
+            ui.status(m % (srcauthor, authors[srcauthor], dstauthor))
+    return authors
+
+
 def recode(s):
     if isinstance(s, pycompat.unicode):
         return s.encode(pycompat.sysstr(orig_encoding), 'replace')
@@ -448,32 +478,7 @@
             ofile.close()
 
     def readauthormap(self, authorfile):
-        afile = open(authorfile, b'rb')
-        for line in afile:
-
-            line = line.strip()
-            if not line or line.startswith(b'#'):
-                continue
-
-            try:
-                srcauthor, dstauthor = line.split(b'=', 1)
-            except ValueError:
-                msg = _(b'ignoring bad line in author map file %s: %s\n')
-                self.ui.warn(msg % (authorfile, line.rstrip()))
-                continue
-
-            srcauthor = srcauthor.strip()
-            dstauthor = dstauthor.strip()
-            if self.authors.get(srcauthor) in (None, dstauthor):
-                msg = _(b'mapping author %s to %s\n')
-                self.ui.debug(msg % (srcauthor, dstauthor))
-                self.authors[srcauthor] = dstauthor
-                continue
-
-            m = _(b'overriding mapping for author %s, was %s, will be %s\n')
-            self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor))
-
-        afile.close()
+        self.authors = readauthormap(self.ui, authorfile, self.authors)
 
     def cachecommit(self, rev):
         commit = self.source.getcommit(rev)
--- a/hgext/convert/cvs.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/convert/cvs.py	Tue Jan 21 13:14:51 2020 -0500
@@ -144,9 +144,7 @@
 
         if root.startswith(b":pserver:"):
             root = root[9:]
-            m = re.match(
-                r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)', root
-            )
+            m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:/]*)(?::(\d*))?(.*)', root)
             if m:
                 conntype = b"pserver"
                 user, passw, serv, port, root = m.groups()
--- a/hgext/convert/cvsps.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/convert/cvsps.py	Tue Jan 21 13:14:51 2020 -0500
@@ -54,10 +54,8 @@
         self.__dict__.update(entries)
 
     def __repr__(self):
-        items = (
-            r"%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)
-        )
-        return r"%s(%s)" % (type(self).__name__, r", ".join(items))
+        items = ("%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__))
+        return "%s(%s)" % (type(self).__name__, ", ".join(items))
 
 
 class logerror(Exception):
@@ -112,7 +110,7 @@
     _scache = {}
 
     def scache(s):
-        b"return a shared version of a string"
+        """return a shared version of a string"""
         return _scache.setdefault(s, s)
 
     ui.status(_(b'collecting CVS rlog\n'))
@@ -713,7 +711,7 @@
     # Sort files in each changeset
 
     def entitycompare(l, r):
-        b'Mimic cvsps sorting order'
+        """Mimic cvsps sorting order"""
         l = l.file.split(b'/')
         r = r.file.split(b'/')
         nl = len(l)
--- a/hgext/convert/gnuarch.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/convert/gnuarch.py	Tue Jan 21 13:14:51 2020 -0500
@@ -302,25 +302,25 @@
 
             # Commit date
             self.changes[rev].date = dateutil.datestr(
-                dateutil.strdate(catlog[r'Standard-date'], b'%Y-%m-%d %H:%M:%S')
+                dateutil.strdate(catlog['Standard-date'], b'%Y-%m-%d %H:%M:%S')
             )
 
             # Commit author
-            self.changes[rev].author = self.recode(catlog[r'Creator'])
+            self.changes[rev].author = self.recode(catlog['Creator'])
 
             # Commit description
             self.changes[rev].summary = b'\n\n'.join(
                 (
-                    self.recode(catlog[r'Summary']),
+                    self.recode(catlog['Summary']),
                     self.recode(catlog.get_payload()),
                 )
             )
             self.changes[rev].summary = self.recode(self.changes[rev].summary)
 
             # Commit revision origin when dealing with a branch or tag
-            if r'Continuation-of' in catlog:
+            if 'Continuation-of' in catlog:
                 self.changes[rev].continuationof = self.recode(
-                    catlog[r'Continuation-of']
+                    catlog['Continuation-of']
                 )
         except Exception:
             raise error.Abort(_(b'could not parse cat-log of %s') % rev)
--- a/hgext/convert/monotone.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/convert/monotone.py	Tue Jan 21 13:14:51 2020 -0500
@@ -96,7 +96,7 @@
             return self.mtnrunsingle(*args, **kwargs)
 
     def mtnrunsingle(self, *args, **kwargs):
-        kwargs[r'd'] = self.path
+        kwargs['d'] = self.path
         return self.run0(b'automate', *args, **kwargs)
 
     def mtnrunstdio(self, *args, **kwargs):
@@ -239,7 +239,7 @@
         #   key "test@selenic.com"
         # mtn >= 0.45:
         #   key [ff58a7ffb771907c4ff68995eada1c4da068d328]
-        certlist = re.split(br'\n\n      key ["\[]', certlist)
+        certlist = re.split(br'\n\n {6}key ["\[]', certlist)
         for e in certlist:
             m = self.cert_re.match(e)
             if m:
--- a/hgext/convert/p4.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/convert/p4.py	Tue Jan 21 13:14:51 2020 -0500
@@ -24,7 +24,7 @@
 
 
 def loaditer(f):
-    b"Yield the dictionary objects generated by p4"
+    """Yield the dictionary objects generated by p4"""
     try:
         while True:
             d = marshal.load(f)
@@ -105,7 +105,7 @@
         self.revmap = revmap
 
     def _parse_view(self, path):
-        b"Read changes affecting the path"
+        """Read changes affecting the path"""
         cmd = b'p4 -G changes -s submitted %s' % procutil.shellquote(path)
         stdout = procutil.popen(cmd, mode=b'rb')
         p4changes = {}
@@ -116,7 +116,7 @@
         return p4changes
 
     def _parse(self, ui, path):
-        b"Prepare list of P4 filenames and revisions to import"
+        """Prepare list of P4 filenames and revisions to import"""
         p4changes = {}
         changeset = {}
         files_map = {}
--- a/hgext/convert/subversion.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/convert/subversion.py	Tue Jan 21 13:14:51 2020 -0500
@@ -643,7 +643,7 @@
         if not re.match(
             r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-'
             r'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]'
-            r'{12,12}(.*)\@[0-9]+$',
+            r'{12,12}(.*)@[0-9]+$',
             revstr,
         ):
             raise error.Abort(
@@ -1303,7 +1303,7 @@
             self.wc = os.path.realpath(path)
             self.run0(b'update')
         else:
-            if not re.search(br'^(file|http|https|svn|svn\+ssh)\://', path):
+            if not re.search(br'^(file|http|https|svn|svn\+ssh)://', path):
                 path = os.path.realpath(path)
                 if os.path.isdir(os.path.dirname(path)):
                     if not os.path.exists(
@@ -1359,11 +1359,11 @@
         m = set()
         output = self.run0(b'ls', recursive=True, xml=True)
         doc = xml.dom.minidom.parseString(output)
-        for e in doc.getElementsByTagName(r'entry'):
+        for e in doc.getElementsByTagName('entry'):
             for n in e.childNodes:
-                if n.nodeType != n.ELEMENT_NODE or n.tagName != r'name':
+                if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
                     continue
-                name = r''.join(
+                name = ''.join(
                     c.data for c in n.childNodes if c.nodeType == c.TEXT_NODE
                 )
                 # Entries are compared with names coming from
@@ -1502,7 +1502,7 @@
             self.setexec = []
 
         fd, messagefile = pycompat.mkstemp(prefix=b'hg-convert-')
-        fp = os.fdopen(fd, r'wb')
+        fp = os.fdopen(fd, 'wb')
         fp.write(util.tonativeeol(commit.desc))
         fp.close()
         try:
--- a/hgext/extdiff.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/extdiff.py	Tue Jan 21 13:14:51 2020 -0500
@@ -271,7 +271,7 @@
         path1a = os.path.join(tmproot, dir1a, commonfile)
         label1a = commonfile + rev1a
         if not os.path.isfile(path1a):
-            path1a = os.devnull
+            path1a = pycompat.osdevnull
 
         path1b = b''
         label1b = b''
@@ -279,7 +279,7 @@
             path1b = os.path.join(tmproot, dir1b, commonfile)
             label1b = commonfile + rev1b
             if not os.path.isfile(path1b):
-                path1b = os.devnull
+                path1b = pycompat.osdevnull
 
         path2 = os.path.join(dir2root, dir2, commonfile)
         label2 = commonfile + rev2
@@ -401,13 +401,14 @@
         if node2 is None:
             raise error.Abort(_(b'--patch requires two revisions'))
     else:
-        mod_a, add_a, rem_a = map(
-            set, repo.status(node1a, node2, matcher, listsubrepos=subrepos)[:3]
-        )
+        st = repo.status(node1a, node2, matcher, listsubrepos=subrepos)
+        mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed)
         if do3way:
-            mod_b, add_b, rem_b = map(
-                set,
-                repo.status(node1b, node2, matcher, listsubrepos=subrepos)[:3],
+            stb = repo.status(node1b, node2, matcher, listsubrepos=subrepos)
+            mod_b, add_b, rem_b = (
+                set(stb.modified),
+                set(stb.added),
+                set(stb.removed),
             )
         else:
             mod_b, add_b, rem_b = set(), set(), set()
@@ -467,12 +468,12 @@
                 dir1a = os.path.join(tmproot, dir1a, common_file)
                 label1a = common_file + rev1a
                 if not os.path.isfile(dir1a):
-                    dir1a = os.devnull
+                    dir1a = pycompat.osdevnull
                 if do3way:
                     dir1b = os.path.join(tmproot, dir1b, common_file)
                     label1b = common_file + rev1b
                     if not os.path.isfile(dir1b):
-                        dir1b = os.devnull
+                        dir1b = pycompat.osdevnull
                 dir2 = os.path.join(dir2root, dir2, common_file)
                 label2 = common_file + rev2
         else:
@@ -655,7 +656,7 @@
         # in an unknown encoding anyway), but avoid double separators on
         # Windows
         docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\')
-        self.__doc__ %= {r'path': pycompat.sysstr(stringutil.uirepr(docpath))}
+        self.__doc__ %= {'path': pycompat.sysstr(stringutil.uirepr(docpath))}
         self._cmdline = cmdline
         self._isgui = isgui
 
--- a/hgext/fastannotate/commands.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/fastannotate/commands.py	Tue Jan 21 13:14:51 2020 -0500
@@ -82,7 +82,7 @@
 
 
 fastannotatecommandargs = {
-    r'options': [
+    'options': [
         (b'r', b'rev', b'.', _(b'annotate the specified revision'), _(b'REV')),
         (b'u', b'user', None, _(b'list the author (long with -v)')),
         (b'f', b'file', None, _(b'list the filename')),
@@ -133,8 +133,8 @@
     + commands.diffwsopts
     + commands.walkopts
     + commands.formatteropts,
-    r'synopsis': _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
-    r'inferrepo': True,
+    'synopsis': _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
+    'inferrepo': True,
 }
 
 
@@ -257,7 +257,7 @@
 _newopts = set()
 _knownopts = {
     opt[1].replace(b'-', b'_')
-    for opt in (fastannotatecommandargs[r'options'] + commands.globalopts)
+    for opt in (fastannotatecommandargs['options'] + commands.globalopts)
 }
 
 
@@ -269,10 +269,10 @@
 
     # treat the file as text (skip the isbinary check)
     if ui.configbool(b'fastannotate', b'forcetext'):
-        opts[r'text'] = True
+        opts['text'] = True
 
     # check if we need to do prefetch (client-side)
-    rev = opts.get(r'rev')
+    rev = opts.get('rev')
     if util.safehasattr(repo, 'prefetchfastannotate') and rev is not None:
         paths = list(_matchpaths(repo, rev, pats, pycompat.byteskwargs(opts)))
         repo.prefetchfastannotate(paths)
--- a/hgext/fastannotate/context.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/fastannotate/context.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,7 +9,6 @@
 
 import collections
 import contextlib
-import hashlib
 import os
 
 from mercurial.i18n import _
@@ -28,7 +27,10 @@
     scmutil,
     util,
 )
-from mercurial.utils import stringutil
+from mercurial.utils import (
+    hashutil,
+    stringutil,
+)
 
 from . import (
     error as faerror,
@@ -148,7 +150,7 @@
     diffoptstr = stringutil.pprint(
         sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults)
     )
-    return node.hex(hashlib.sha1(diffoptstr).digest())[:6]
+    return node.hex(hashutil.sha1(diffoptstr).digest())[:6]
 
 
 _defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
--- a/hgext/fastannotate/support.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/fastannotate/support.py	Tue Jan 21 13:14:51 2020 -0500
@@ -74,7 +74,6 @@
     may raise Exception, and always return line numbers.
     """
     master = _getmaster(fctx)
-    annotated = contents = None
 
     with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
         try:
--- a/hgext/fix.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/fix.py	Tue Jan 21 13:14:51 2020 -0500
@@ -144,9 +144,9 @@
     match as matchmod,
     mdiff,
     merge,
-    obsolete,
     pycompat,
     registrar,
+    rewriteutil,
     scmutil,
     util,
     worker,
@@ -249,9 +249,8 @@
     override this default behavior, though it is not usually desirable to do so.
     """
     opts = pycompat.byteskwargs(opts)
+    cmdutil.check_at_most_one_arg(opts, b'all', b'rev')
     if opts[b'all']:
-        if opts[b'rev']:
-            raise error.Abort(_(b'cannot specify both "--rev" and "--all"'))
         opts[b'rev'] = [b'not public() and not obsolete()']
         opts[b'working_dir'] = True
     with repo.wlock(), repo.lock(), repo.transaction(b'fix'):
@@ -404,7 +403,7 @@
         checkfixablectx(ui, repo, repo[rev])
     if revs:
         cmdutil.checkunfinished(repo)
-        checknodescendants(repo, revs)
+        rewriteutil.precheck(repo, revs, b'fix')
     if opts.get(b'working_dir'):
         revs.add(wdirrev)
         if list(merge.mergestate.read(repo).unresolved()):
@@ -416,22 +415,8 @@
     return revs
 
 
-def checknodescendants(repo, revs):
-    if not obsolete.isenabled(repo, obsolete.allowunstableopt) and repo.revs(
-        b'(%ld::) - (%ld)', revs, revs
-    ):
-        raise error.Abort(
-            _(b'can only fix a changeset together with all its descendants')
-        )
-
-
 def checkfixablectx(ui, repo, ctx):
     """Aborts if the revision shouldn't be replaced with a fixed one."""
-    if not ctx.mutable():
-        raise error.Abort(
-            b'can\'t fix immutable changeset %s'
-            % (scmutil.formatchangeid(ctx),)
-        )
     if ctx.obsolete():
         # It would be better to actually check if the revision has a successor.
         allowdivergence = ui.configbool(
@@ -681,7 +666,7 @@
             if rev is None:
                 ui.warn(_(b'wdir'), label=b'evolve.rev')
             else:
-                ui.warn((str(rev)), label=b'evolve.rev')
+                ui.warn(b'%d' % rev, label=b'evolve.rev')
             ui.warn(b'] %s: %s\n' % (fixername, line))
 
 
@@ -745,36 +730,38 @@
     ):
         return
 
-    def filectxfn(repo, memctx, path):
-        if path not in ctx:
-            return None
-        fctx = ctx[path]
-        copysource = fctx.copysource()
-        return context.memfilectx(
-            repo,
-            memctx,
-            path=fctx.path(),
-            data=filedata.get(path, fctx.data()),
-            islink=fctx.islink(),
-            isexec=fctx.isexec(),
-            copysource=copysource,
-        )
-
     extra = ctx.extra().copy()
     extra[b'fix_source'] = ctx.hex()
 
-    memctx = context.memctx(
+    wctx = context.overlayworkingctx(repo)
+    wctx.setbase(repo[newp1node])
+    merge.update(
         repo,
-        parents=(newp1node, newp2node),
+        ctx.rev(),
+        branchmerge=False,
+        force=True,
+        ancestor=p1rev,
+        mergeancestor=False,
+        wc=wctx,
+    )
+    copies.graftcopies(wctx, ctx, ctx.p1())
+
+    for path in filedata.keys():
+        fctx = ctx[path]
+        copysource = fctx.copysource()
+        wctx.write(path, filedata[path], flags=fctx.flags())
+        if copysource:
+            wctx.markcopied(path, copysource)
+
+    memctx = wctx.tomemctx(
         text=ctx.description(),
-        files=set(ctx.files()) | set(filedata.keys()),
-        filectxfn=filectxfn,
-        user=ctx.user(),
+        branch=ctx.branch(),
+        extra=extra,
         date=ctx.date(),
-        extra=extra,
-        branch=ctx.branch(),
-        editor=None,
+        parents=(newp1node, newp2node),
+        user=ctx.user(),
     )
+
     sucnode = memctx.commit()
     prenode = ctx.node()
     if prenode == sucnode:
--- a/hgext/fsmonitor/__init__.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/fsmonitor/__init__.py	Tue Jan 21 13:14:51 2020 -0500
@@ -108,7 +108,6 @@
 from __future__ import absolute_import
 
 import codecs
-import hashlib
 import os
 import stat
 import sys
@@ -132,7 +131,10 @@
     util,
 )
 from mercurial import match as matchmod
-from mercurial.utils import stringutil
+from mercurial.utils import (
+    hashutil,
+    stringutil,
+)
 
 from . import (
     pywatchman,
@@ -235,7 +237,7 @@
     copy.
 
     """
-    sha1 = hashlib.sha1()
+    sha1 = hashutil.sha1()
     sha1.update(pycompat.byterepr(ignore))
     return pycompat.sysbytes(sha1.hexdigest())
 
--- a/hgext/fsmonitor/pywatchman/capabilities.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/fsmonitor/pywatchman/capabilities.py	Tue Jan 21 13:14:51 2020 -0500
@@ -29,8 +29,6 @@
 # no unicode literals
 from __future__ import absolute_import, division, print_function
 
-import re
-
 
 def parse_version(vstr):
     res = 0
@@ -64,7 +62,7 @@
     vers["capabilities"] = {}
     for name in opts["optional"]:
         vers["capabilities"][name] = check(parsed_version, name)
-    failed = False  # noqa: F841 T25377293 Grandfathered in
+
     for name in opts["required"]:
         have = check(parsed_version, name)
         vers["capabilities"][name] = have
--- a/hgext/fsmonitor/pywatchman/pybser.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/fsmonitor/pywatchman/pybser.py	Tue Jan 21 13:14:51 2020 -0500
@@ -506,7 +506,6 @@
 
 
 def _pdu_info_helper(buf):
-    bser_version = -1
     if buf[0:2] == EMPTY_HEADER[0:2]:
         bser_version = 1
         bser_capabilities = 0
--- a/hgext/fsmonitor/watchmanclient.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/fsmonitor/watchmanclient.py	Tue Jan 21 13:14:51 2020 -0500
@@ -105,11 +105,11 @@
                 )
             return self._watchmanclient.query(*watchmanargs)
         except pywatchman.CommandError as ex:
-            if b'unable to resolve root' in ex.msg:
+            if 'unable to resolve root' in ex.msg:
                 raise WatchmanNoRoot(
                     self._root, stringutil.forcebytestr(ex.msg)
                 )
-            raise Unavailable(ex.msg)
+            raise Unavailable(stringutil.forcebytestr(ex.msg))
         except pywatchman.WatchmanError as ex:
             raise Unavailable(stringutil.forcebytestr(ex))
 
--- a/hgext/githelp.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/githelp.py	Tue Jan 21 13:14:51 2020 -0500
@@ -90,11 +90,11 @@
             args = fancyopts.fancyopts(list(args), cmdoptions, opts, True)
             break
         except getopt.GetoptError as ex:
-            if r"requires argument" in ex.msg:
+            if "requires argument" in ex.msg:
                 raise
-            if (r'--' + ex.opt) in ex.msg:
+            if ('--' + ex.opt) in ex.msg:
                 flag = b'--' + pycompat.bytestr(ex.opt)
-            elif (r'-' + ex.opt) in ex.msg:
+            elif ('-' + ex.opt) in ex.msg:
                 flag = b'-' + pycompat.bytestr(ex.opt)
             else:
                 raise error.Abort(
@@ -209,7 +209,7 @@
 
 def am(ui, repo, *args, **kwargs):
     cmdoptions = []
-    args, opts = parseoptions(ui, cmdoptions, args)
+    parseoptions(ui, cmdoptions, args)
     cmd = Command(b'import')
     ui.status(bytes(cmd), b"\n")
 
@@ -1139,7 +1139,7 @@
 
 def svndcommit(ui, repo, *args, **kwargs):
     cmdoptions = []
-    args, opts = parseoptions(ui, cmdoptions, args)
+    parseoptions(ui, cmdoptions, args)
 
     cmd = Command(b'push')
 
@@ -1148,7 +1148,7 @@
 
 def svnfetch(ui, repo, *args, **kwargs):
     cmdoptions = []
-    args, opts = parseoptions(ui, cmdoptions, args)
+    parseoptions(ui, cmdoptions, args)
 
     cmd = Command(b'pull')
     cmd.append(b'default-push')
@@ -1173,7 +1173,7 @@
     cmdoptions = [
         (b'l', b'local', None, b''),
     ]
-    args, opts = parseoptions(ui, cmdoptions, args)
+    parseoptions(ui, cmdoptions, args)
 
     pullcmd = Command(b'pull')
     pullcmd.append(b'default-push')
--- a/hgext/gpg.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/gpg.py	Tue Jan 21 13:14:51 2020 -0500
@@ -69,11 +69,11 @@
         try:
             # create temporary files
             fd, sigfile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".sig")
-            fp = os.fdopen(fd, r'wb')
+            fp = os.fdopen(fd, 'wb')
             fp.write(sig)
             fp.close()
             fd, datafile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".txt")
-            fp = os.fdopen(fd, r'wb')
+            fp = os.fdopen(fd, 'wb')
             fp.write(data)
             fp.close()
             gpgcmd = (
@@ -121,7 +121,7 @@
 def newgpg(ui, **opts):
     """create a new gpg instance"""
     gpgpath = ui.config(b"gpg", b"cmd")
-    gpgkey = opts.get(r'key')
+    gpgkey = opts.get('key')
     if not gpgkey:
         gpgkey = ui.config(b"gpg", b"key")
     return gpg(gpgpath, gpgkey)
--- a/hgext/graphlog.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/graphlog.py	Tue Jan 21 13:14:51 2020 -0500
@@ -121,5 +121,5 @@
 
     This is an alias to :hg:`log -G`.
     """
-    opts[r'graph'] = True
+    opts['graph'] = True
     return commands.log(ui, repo, *pats, **opts)
--- a/hgext/hgk.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/hgk.py	Tue Jan 21 13:14:51 2020 -0500
@@ -92,21 +92,21 @@
         mmap = repo[node1].manifest()
         mmap2 = repo[node2].manifest()
         m = scmutil.match(repo[node1], files)
-        modified, added, removed = repo.status(node1, node2, m)[:3]
+        st = repo.status(node1, node2, m)
         empty = short(nullid)
 
-        for f in modified:
+        for f in st.modified:
             # TODO get file permissions
             ui.writenoi18n(
                 b":100664 100664 %s %s M\t%s\t%s\n"
                 % (short(mmap[f]), short(mmap2[f]), f, f)
             )
-        for f in added:
+        for f in st.added:
             ui.writenoi18n(
                 b":000000 100664 %s %s N\t%s\t%s\n"
                 % (empty, short(mmap2[f]), f, f)
             )
-        for f in removed:
+        for f in st.removed:
             ui.writenoi18n(
                 b":100664 000000 %s %s D\t%s\t%s\n"
                 % (short(mmap[f]), empty, f, f)
@@ -115,7 +115,7 @@
     ##
 
     while True:
-        if opts[r'stdin']:
+        if opts['stdin']:
             line = ui.fin.readline()
             if not line:
                 break
@@ -131,8 +131,8 @@
         else:
             node2 = node1
             node1 = repo.changelog.parents(node1)[0]
-        if opts[r'patch']:
-            if opts[r'pretty']:
+        if opts['patch']:
+            if opts['pretty']:
                 catcommit(ui, repo, node2, b"")
             m = scmutil.match(repo[node1], files)
             diffopts = patch.difffeatureopts(ui)
@@ -142,7 +142,7 @@
                 ui.write(chunk)
         else:
             __difftree(repo, node1, node2, files=files)
-        if not opts[r'stdin']:
+        if not opts['stdin']:
             break
 
 
@@ -201,7 +201,7 @@
     # strings
     #
     prefix = b""
-    if opts[r'stdin']:
+    if opts['stdin']:
         line = ui.fin.readline()
         if not line:
             return
@@ -218,7 +218,7 @@
             return 1
         n = repo.lookup(r)
         catcommit(ui, repo, n, prefix)
-        if opts[r'stdin']:
+        if opts['stdin']:
             line = ui.fin.readline()
             if not line:
                 break
@@ -363,7 +363,7 @@
     else:
         full = None
     copy = [x for x in revs]
-    revtree(ui, copy, repo, full, opts[r'max_count'], opts[r'parents'])
+    revtree(ui, copy, repo, full, opts['max_count'], opts[r'parents'])
 
 
 @command(
@@ -373,7 +373,7 @@
     helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
 )
 def view(ui, repo, *etc, **opts):
-    b"start interactive history viewer"
+    """start interactive history viewer"""
     opts = pycompat.byteskwargs(opts)
     os.chdir(repo.root)
     optstr = b' '.join(
--- a/hgext/highlight/__init__.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/highlight/__init__.py	Tue Jan 21 13:14:51 2020 -0500
@@ -52,7 +52,7 @@
     filenameonly = web.configbool(b'web', b'highlightonlymatchfilename', False)
 
     ctx = fctx.changectx()
-    m = ctx.matchfileset(expr)
+    m = ctx.matchfileset(fctx.repo().root, expr)
     if m(fctx.path()):
         highlight.pygmentize(
             field, fctx, style, tmpl, guessfilenameonly=filenameonly
--- a/hgext/histedit.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/histedit.py	Tue Jan 21 13:14:51 2020 -0500
@@ -230,6 +230,7 @@
     pycompat,
     registrar,
     repair,
+    rewriteutil,
     scmutil,
     state as statemod,
     util,
@@ -307,7 +308,7 @@
         if len(a.verbs):
             v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
         actions.append(b" %s = %s" % (v, lines[0]))
-        actions.extend([b'  %s' for l in lines[1:]])
+        actions.extend([b'  %s'] * (len(lines) - 1))
 
     for v in (
         sorted(primaryactions)
@@ -624,9 +625,9 @@
     def commitfunc(**kwargs):
         overrides = {(b'phases', b'new-commit'): phasemin}
         with repo.ui.configoverride(overrides, b'histedit'):
-            extra = kwargs.get(r'extra', {}).copy()
+            extra = kwargs.get('extra', {}).copy()
             extra[b'histedit_source'] = src.hex()
-            kwargs[r'extra'] = extra
+            kwargs['extra'] = extra
             return repo.commit(**kwargs)
 
     return commitfunc
@@ -1056,6 +1057,7 @@
 
 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
+COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11
 
 E_QUIT, E_HISTEDIT = 1, 2
 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
@@ -1119,32 +1121,42 @@
         self.conflicts = []
 
     def __bytes__(self):
-        # Some actions ('fold' and 'roll') combine a patch with a previous one.
-        # Add a marker showing which patch they apply to, and also omit the
-        # description for 'roll' (since it will get discarded). Example display:
+        # Example display of several histeditrules:
         #
         #  #10 pick   316392:06a16c25c053   add option to skip tests
-        #  #11 ^roll  316393:71313c964cc5
+        #  #11 ^roll  316393:71313c964cc5   <RED>oops a fixup commit</RED>
         #  #12 pick   316394:ab31f3973b0d   include mfbt for mozilla-config.h
         #  #13 ^fold  316395:14ce5803f4c3   fix warnings
         #
         # The carets point to the changeset being folded into ("roll this
         # changeset into the changeset above").
+        return b'%s%s' % (self.prefix, self.desc)
+
+    __str__ = encoding.strmethod(__bytes__)
+
+    @property
+    def prefix(self):
+        # Some actions ('fold' and 'roll') combine a patch with a
+        # previous one. Add a marker showing which patch they apply
+        # to.
         action = ACTION_LABELS.get(self.action, self.action)
+
         h = self.ctx.hex()[0:12]
         r = self.ctx.rev()
-        desc = self.ctx.description().splitlines()[0].strip()
-        if self.action == b'roll':
-            desc = b''
-        return b"#%s %s %d:%s   %s" % (
+
+        return b"#%s %s %d:%s   " % (
             (b'%d' % self.origpos).ljust(2),
             action.ljust(6),
             r,
             h,
-            desc,
         )
 
-    __str__ = encoding.strmethod(__bytes__)
+    @property
+    def desc(self):
+        # This is split off from the prefix property so that we can
+        # separately make the description for 'roll' red (since it
+        # will get discarded).
+        return self.ctx.description().splitlines()[0].strip()
 
     def checkconflicts(self, other):
         if other.pos > self.pos and other.origpos <= self.origpos:
@@ -1382,6 +1394,11 @@
     curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
     curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
     curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
+    curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
+    curses.init_pair(
+        COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
+    )
+    curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
 
     # don't display the cursor
     try:
@@ -1483,9 +1500,12 @@
                 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
             else:
                 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
+
             if y + start == selected:
+                rollcolor = COLOR_ROLL_SELECTED
                 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
             elif y + start == pos:
+                rollcolor = COLOR_ROLL_CURRENT
                 addln(
                     rulesscr,
                     y,
@@ -1494,7 +1514,17 @@
                     curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
                 )
             else:
+                rollcolor = COLOR_ROLL
                 addln(rulesscr, y, 2, rule)
+
+            if rule.action == b'roll':
+                rulesscr.addstr(
+                    y,
+                    2 + len(rule.prefix),
+                    rule.desc,
+                    curses.color_pair(rollcolor),
+                )
+
         rulesscr.noutrefresh()
 
     def renderstring(win, state, output, diffcolors=False):
@@ -1674,7 +1704,7 @@
         # Curses requires setting the locale or it will default to the C
         # locale. This sets the locale to the user's default system
         # locale.
-        locale.setlocale(locale.LC_ALL, r'')
+        locale.setlocale(locale.LC_ALL, '')
         rc = curses.wrapper(functools.partial(_chisteditmain, repo, ctxs))
         curses.echo()
         curses.endwin()
@@ -2046,11 +2076,11 @@
             mapping[n] = ()
 
     # remove entries about unknown nodes
-    nodemap = repo.unfiltered().changelog.nodemap
+    has_node = repo.unfiltered().changelog.index.has_node
     mapping = {
         k: v
         for k, v in mapping.items()
-        if k in nodemap and all(n in nodemap for n in v)
+        if has_node(k) and all(has_node(n) for n in v)
     }
     scmutil.cleanupnodes(repo, mapping, b'histedit')
     hf = fm.hexfunc
@@ -2277,23 +2307,9 @@
     When keep is false, the specified set can't have children."""
     revs = repo.revs(b'%n::%n', old, new)
     if revs and not keep:
-        if not obsolete.isenabled(
-            repo, obsolete.allowunstableopt
-        ) and repo.revs(b'(%ld::) - (%ld)', revs, revs):
-            raise error.Abort(
-                _(
-                    b'can only histedit a changeset together '
-                    b'with all its descendants'
-                )
-            )
+        rewriteutil.precheck(repo, revs, b'edit')
         if repo.revs(b'(%ld) and merge()', revs):
             raise error.Abort(_(b'cannot edit history that contains merges'))
-        root = repo[revs.first()]  # list is already sorted by repo.revs()
-        if not root.mutable():
-            raise error.Abort(
-                _(b'cannot edit public changeset: %s') % root,
-                hint=_(b"see 'hg help phases' for details"),
-            )
     return pycompat.maplist(repo.changelog.node, revs)
 
 
@@ -2447,7 +2463,7 @@
         return oldreplacements
 
     unfi = repo.unfiltered()
-    nm = unfi.changelog.nodemap
+    get_rev = unfi.changelog.index.get_rev
     obsstore = repo.obsstore
     newreplacements = list(oldreplacements)
     oldsuccs = [r[1] for r in oldreplacements]
@@ -2458,7 +2474,7 @@
     succstocheck = list(seensuccs)
     while succstocheck:
         n = succstocheck.pop()
-        missing = nm.get(n) is None
+        missing = get_rev(n) is None
         markers = obsstore.successors.get(n, ())
         if missing and not markers:
             # dead end, mark it as such
@@ -2517,9 +2533,9 @@
         del final[n]
     # we expect all changes involved in final to exist in the repo
     # turn `final` into list (topologically sorted)
-    nm = state.repo.changelog.nodemap
+    get_rev = state.repo.changelog.index.get_rev
     for prec, succs in final.items():
-        final[prec] = sorted(succs, key=nm.get)
+        final[prec] = sorted(succs, key=get_rev)
 
     # computed topmost element (necessary for bookmark)
     if new:
@@ -2565,8 +2581,8 @@
         repo = repo.unfiltered()
         # Find all nodes that need to be stripped
         # (we use %lr instead of %ln to silently ignore unknown items)
-        nm = repo.changelog.nodemap
-        nodes = sorted(n for n in nodes if n in nm)
+        has_node = repo.changelog.index.has_node
+        nodes = sorted(n for n in nodes if has_node(n))
         roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
         if roots:
             backup = not nobackup
--- a/hgext/infinitepush/__init__.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/infinitepush/__init__.py	Tue Jan 21 13:14:51 2020 -0500
@@ -195,7 +195,7 @@
 revsetpredicate = registrar.revsetpredicate()
 templatekeyword = registrar.templatekeyword()
 _scratchbranchmatcher = lambda x: False
-_maybehash = re.compile(r'^[a-f0-9]+$').search
+_maybehash = re.compile('^[a-f0-9]+$').search
 
 
 def _buildexternalbundlestore(ui):
@@ -548,7 +548,7 @@
     allbundlestocleanup = []
     try:
         for head in heads:
-            if head not in repo.changelog.nodemap:
+            if not repo.changelog.index.has_node(head):
                 if head not in nodestobundle:
                     newbundlefile = common.downloadbundle(repo, head)
                     bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
@@ -1031,7 +1031,7 @@
     fd, bundlefile = pycompat.mkstemp()
     try:
         try:
-            fp = os.fdopen(fd, r'wb')
+            fp = os.fdopen(fd, 'wb')
             fp.write(buf.read())
         finally:
             fp.close()
@@ -1122,7 +1122,7 @@
         fd, bundlefile = pycompat.mkstemp()
         try:
             try:
-                fp = os.fdopen(fd, r'wb')
+                fp = os.fdopen(fd, 'wb')
                 fp.write(buf.read())
             finally:
                 fp.close()
@@ -1254,7 +1254,7 @@
     fd, bundlefile = pycompat.mkstemp()
     try:
         try:
-            fp = os.fdopen(fd, r'wb')
+            fp = os.fdopen(fd, 'wb')
             fp.write(buf.read())
         finally:
             fp.close()
--- a/hgext/infinitepush/common.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/infinitepush/common.py	Tue Jan 21 13:14:51 2020 -0500
@@ -37,7 +37,7 @@
     fd, bundlefile = pycompat.mkstemp()
     try:  # guards bundlefile
         try:  # guards fp
-            fp = os.fdopen(fd, r'wb')
+            fp = os.fdopen(fd, 'wb')
             fp.write(data)
         finally:
             fp.close()
--- a/hgext/infinitepush/store.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/infinitepush/store.py	Tue Jan 21 13:14:51 2020 -0500
@@ -6,7 +6,6 @@
 from __future__ import absolute_import
 
 import abc
-import hashlib
 import os
 import subprocess
 import tempfile
@@ -16,7 +15,10 @@
     node,
     pycompat,
 )
-from mercurial.utils import procutil
+from mercurial.utils import (
+    hashutil,
+    procutil,
+)
 
 NamedTemporaryFile = tempfile.NamedTemporaryFile
 
@@ -29,7 +31,7 @@
     pass
 
 
-class abstractbundlestore(object):
+class abstractbundlestore(object):  # pytype: disable=ignored-metaclass
     """Defines the interface for bundle stores.
 
     A bundle store is an entity that stores raw bundle data. It is a simple
@@ -87,7 +89,7 @@
         return os.path.join(self._dirpath(filename), filename)
 
     def write(self, data):
-        filename = node.hex(hashlib.sha1(data).digest())
+        filename = node.hex(hashutil.sha1(data).digest())
         dirpath = self._dirpath(filename)
 
         if not os.path.exists(dirpath):
--- a/hgext/journal.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/journal.py	Tue Jan 21 13:14:51 2020 -0500
@@ -149,7 +149,7 @@
 
     Note that by default entries go from most recent to oldest.
     """
-    order = kwargs.pop(r'order', max)
+    order = kwargs.pop('order', max)
     iterables = [iter(it) for it in iterables]
     # this tracks still active iterables; iterables are deleted as they are
     # exhausted, which is why this is a dictionary and why each entry also
@@ -214,8 +214,8 @@
 
 class journalentry(
     collections.namedtuple(
-        r'journalentry',
-        r'timestamp user command namespace name oldhashes newhashes',
+        'journalentry',
+        'timestamp user command namespace name oldhashes newhashes',
     )
 ):
     """Individual journal entry
--- a/hgext/keyword.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/keyword.py	Tue Jan 21 13:14:51 2020 -0500
@@ -482,16 +482,16 @@
     ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
 
     uikwmaps = ui.configitems(b'keywordmaps')
-    if args or opts.get(r'rcfile'):
+    if args or opts.get('rcfile'):
         ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
         if uikwmaps:
             ui.status(_(b'\textending current template maps\n'))
-        if opts.get(r'default') or not uikwmaps:
+        if opts.get('default') or not uikwmaps:
             if svn:
                 ui.status(_(b'\toverriding default svn keywordset\n'))
             else:
                 ui.status(_(b'\toverriding default cvs keywordset\n'))
-        if opts.get(r'rcfile'):
+        if opts.get('rcfile'):
             ui.readconfig(opts.get(b'rcfile'))
         if args:
             # simulate hgrc parsing
@@ -499,7 +499,7 @@
             repo.vfs.write(b'hgrc', rcmaps)
             ui.readconfig(repo.vfs.join(b'hgrc'))
         kwmaps = dict(ui.configitems(b'keywordmaps'))
-    elif opts.get(r'default'):
+    elif opts.get('default'):
         if svn:
             ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
         else:
--- a/hgext/largefiles/lfcommands.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/largefiles/lfcommands.py	Tue Jan 21 13:14:51 2020 -0500
@@ -10,7 +10,6 @@
 from __future__ import absolute_import
 
 import errno
-import hashlib
 import os
 import shutil
 
@@ -29,6 +28,7 @@
     scmutil,
     util,
 )
+from mercurial.utils import hashutil
 
 from ..convert import (
     convcmd,
@@ -273,7 +273,7 @@
                         )
 
                 # largefile was modified, update standins
-                m = hashlib.sha1(b'')
+                m = hashutil.sha1(b'')
                 m.update(ctx[f].data())
                 hash = node.hex(m.digest())
                 if f not in lfiletohash or lfiletohash[f] != hash:
@@ -648,7 +648,7 @@
     """
     repo.lfpullsource = source
 
-    revs = opts.get(r'rev', [])
+    revs = opts.get('rev', [])
     if not revs:
         raise error.Abort(_(b'no revisions specified'))
     revs = scmutil.revrange(repo, revs)
--- a/hgext/largefiles/lfutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/largefiles/lfutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,8 +9,8 @@
 '''largefiles utility code: must not import other modules in this package.'''
 from __future__ import absolute_import
 
+import contextlib
 import copy
-import hashlib
 import os
 import stat
 
@@ -31,6 +31,7 @@
     util,
     vfs as vfsmod,
 )
+from mercurial.utils import hashutil
 
 shortname = b'.hglf'
 shortnameslash = shortname + b'/'
@@ -39,6 +40,16 @@
 # -- Private worker functions ------------------------------------------
 
 
+@contextlib.contextmanager
+def lfstatus(repo, value=True):
+    oldvalue = getattr(repo, 'lfstatus', False)
+    repo.lfstatus = value
+    try:
+        yield
+    finally:
+        repo.lfstatus = oldvalue
+
+
 def getminsize(ui, assumelfiles, opt, default=10):
     lfsize = opt
     if not lfsize and assumelfiles:
@@ -421,7 +432,7 @@
 def copyandhash(instream, outfile):
     '''Read bytes from instream (iterable) and write them to outfile,
     computing the SHA-1 hash of the data along the way. Return the hash.'''
-    hasher = hashlib.sha1(b'')
+    hasher = hashutil.sha1(b'')
     for data in instream:
         hasher.update(data)
         outfile.write(data)
@@ -461,7 +472,7 @@
 def hexsha1(fileobj):
     """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
     object data"""
-    h = hashlib.sha1()
+    h = hashutil.sha1()
     for chunk in util.filechunkiter(fileobj):
         h.update(chunk)
     return hex(h.digest())
@@ -580,12 +591,8 @@
             progress.update(i)
             parents = [p for p in repo[n].parents() if p != node.nullid]
 
-            oldlfstatus = repo.lfstatus
-            repo.lfstatus = False
-            try:
+            with lfstatus(repo, value=False):
                 ctx = repo[n]
-            finally:
-                repo.lfstatus = oldlfstatus
 
             files = set(ctx.files())
             if len(parents) == 2:
--- a/hgext/largefiles/overrides.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/largefiles/overrides.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,7 +9,6 @@
 '''Overridden Mercurial commands and functions for the largefiles extension'''
 from __future__ import absolute_import
 
-import contextlib
 import copy
 import os
 
@@ -50,6 +49,8 @@
 
 eh = exthelper.exthelper()
 
+lfstatus = lfutil.lfstatus
+
 # -- Utility functions: commonly/repeatedly needed functionality ---------------
 
 
@@ -84,9 +85,9 @@
 
 
 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
-    large = opts.get(r'large')
+    large = opts.get('large')
     lfsize = lfutil.getminsize(
-        ui, lfutil.islfilesrepo(repo), opts.get(r'lfsize')
+        ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
     )
 
     lfmatcher = None
@@ -131,7 +132,7 @@
     # Need to lock, otherwise there could be a race condition between
     # when standins are created and added to the repo.
     with repo.wlock():
-        if not opts.get(r'dry_run'):
+        if not opts.get('dry_run'):
             standins = []
             lfdirstate = lfutil.openlfdirstate(ui, repo)
             for f in lfnames:
@@ -158,18 +159,8 @@
     return added, bad
 
 
-@contextlib.contextmanager
-def lfstatus(repo):
-    oldvalue = getattr(repo, 'lfstatus', False)
-    repo.lfstatus = True
-    try:
-        yield
-    finally:
-        repo.lfstatus = oldvalue
-
-
 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
-    after = opts.get(r'after')
+    after = opts.get('after')
     m = composelargefilematcher(matcher, repo[None].manifest())
     with lfstatus(repo):
         s = repo.status(match=m, clean=not isaddremove)
@@ -269,7 +260,7 @@
     ],
 )
 def overrideadd(orig, ui, repo, *pats, **opts):
-    if opts.get(r'normal') and opts.get(r'large'):
+    if opts.get('normal') and opts.get('large'):
         raise error.Abort(_(b'--normal cannot be used with --large'))
     return orig(ui, repo, *pats, **opts)
 
@@ -277,7 +268,7 @@
 @eh.wrapfunction(cmdutil, b'add')
 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
     # The --normal flag short circuits this override
-    if opts.get(r'normal'):
+    if opts.get('normal'):
         return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
 
     ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
@@ -477,9 +468,9 @@
     ],
 )
 def overrideverify(orig, ui, repo, *pats, **opts):
-    large = opts.pop(r'large', False)
-    all = opts.pop(r'lfa', False)
-    contents = opts.pop(r'lfc', False)
+    large = opts.pop('large', False)
+    all = opts.pop('lfa', False)
+    contents = opts.pop('lfc', False)
 
     result = orig(ui, repo, *pats, **opts)
     if large or all or contents:
@@ -492,7 +483,7 @@
     opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
 )
 def overridedebugstate(orig, ui, repo, *pats, **opts):
-    large = opts.pop(r'large', False)
+    large = opts.pop('large', False)
     if large:
 
         class fakerepo(object):
@@ -975,8 +966,8 @@
     repo.lfpullsource = source
     result = orig(ui, repo, source, **opts)
     revspostpull = len(repo)
-    lfrevs = opts.get(r'lfrev', [])
-    if opts.get(r'all_largefiles'):
+    lfrevs = opts.get('lfrev', [])
+    if opts.get('all_largefiles'):
         lfrevs.append(b'pulled()')
     if lfrevs and revspostpull > revsprepull:
         numcached = 0
@@ -1006,9 +997,9 @@
 )
 def overridepush(orig, ui, repo, *args, **kwargs):
     """Override push command and store --lfrev parameters in opargs"""
-    lfrevs = kwargs.pop(r'lfrev', None)
+    lfrevs = kwargs.pop('lfrev', None)
     if lfrevs:
-        opargs = kwargs.setdefault(r'opargs', {})
+        opargs = kwargs.setdefault('opargs', {})
         opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
     return orig(ui, repo, *args, **kwargs)
 
@@ -1016,7 +1007,7 @@
 @eh.wrapfunction(exchange, b'pushoperation')
 def exchangepushoperation(orig, *args, **kwargs):
     """Override pushoperation constructor and store lfrevs parameter"""
-    lfrevs = kwargs.pop(r'lfrevs', None)
+    lfrevs = kwargs.pop('lfrevs', None)
     pushop = orig(*args, **kwargs)
     pushop.lfrevs = lfrevs
     return pushop
@@ -1064,7 +1055,7 @@
     d = dest
     if d is None:
         d = hg.defaultdest(source)
-    if opts.get(r'all_largefiles') and not hg.islocal(d):
+    if opts.get('all_largefiles') and not hg.islocal(d):
         raise error.Abort(
             _(b'--all-largefiles is incompatible with non-local destination %s')
             % d
@@ -1104,7 +1095,7 @@
     if not util.safehasattr(repo, b'_largefilesenabled'):
         return orig(ui, repo, **opts)
 
-    resuming = opts.get(r'continue')
+    resuming = opts.get('continue')
     repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
     repo._lfstatuswriters.append(lambda *msg, **opts: None)
     try:
@@ -1613,7 +1604,7 @@
 
 @eh.wrapcommand(b'transplant', extension=b'transplant')
 def overridetransplant(orig, ui, repo, *revs, **opts):
-    resuming = opts.get(r'continue')
+    resuming = opts.get('continue')
     repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
     repo._lfstatuswriters.append(lambda *msg, **opts: None)
     try:
@@ -1698,7 +1689,7 @@
 
 @eh.wrapfunction(merge, b'update')
 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
-    matcher = kwargs.get(r'matcher', None)
+    matcher = kwargs.get('matcher', None)
     # note if this is a partial update
     partial = matcher and not matcher.always()
     with repo.wlock():
@@ -1758,7 +1749,7 @@
         # Make sure the merge runs on disk, not in-memory. largefiles is not a
         # good candidate for in-memory merge (large files, custom dirstate,
         # matcher usage).
-        kwargs[r'wc'] = repo[None]
+        kwargs['wc'] = repo[None]
         result = orig(repo, node, branchmerge, force, *args, **kwargs)
 
         newstandins = lfutil.getstandinsstate(repo)
--- a/hgext/largefiles/proto.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/largefiles/proto.py	Tue Jan 21 13:14:51 2020 -0500
@@ -116,7 +116,7 @@
                     b'putlfile',
                     data=fd,
                     sha=sha,
-                    headers={r'content-type': r'application/mercurial-0.1'},
+                    headers={'content-type': 'application/mercurial-0.1'},
                 )
                 try:
                     d, output = res.split(b'\n', 1)
@@ -206,7 +206,7 @@
     if cmd == b'heads' and self.capable(b'largefiles'):
         cmd = b'lheads'
     if cmd == b'batch' and self.capable(b'largefiles'):
-        args[r'cmds'] = args[r'cmds'].replace(b'heads ', b'lheads ')
+        args['cmds'] = args[r'cmds'].replace(b'heads ', b'lheads ')
     return ssholdcallstream(self, cmd, **args)
 
 
@@ -217,5 +217,5 @@
     if cmd == b'heads' and self.capable(b'largefiles'):
         cmd = b'lheads'
     if cmd == b'batch' and self.capable(b'largefiles'):
-        args[r'cmds'] = headsre.sub(b'lheads', args[r'cmds'])
+        args['cmds'] = headsre.sub(b'lheads', args['cmds'])
     return httpoldcallstream(self, cmd, **args)
--- a/hgext/largefiles/reposetup.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/largefiles/reposetup.py	Tue Jan 21 13:14:51 2020 -0500
@@ -15,9 +15,11 @@
 
 from mercurial import (
     error,
+    extensions,
     localrepo,
     match as matchmod,
     scmutil,
+    util,
 )
 
 from . import (
@@ -38,9 +40,6 @@
 
         lfstatus = False
 
-        def status_nolfiles(self, *args, **kwargs):
-            return super(lfilesrepo, self).status(*args, **kwargs)
-
         # When lfstatus is set, return a context that gives the names
         # of largefiles instead of their corresponding standins and
         # identifies the largefiles as always binary, regardless of
@@ -49,45 +48,46 @@
             ctx = super(lfilesrepo, self).__getitem__(changeid)
             if self.lfstatus:
 
-                class lfilesctx(ctx.__class__):
-                    def files(self):
-                        filenames = super(lfilesctx, self).files()
-                        return [lfutil.splitstandin(f) or f for f in filenames]
+                def files(orig):
+                    filenames = orig()
+                    return [lfutil.splitstandin(f) or f for f in filenames]
 
-                    def manifest(self):
-                        man1 = super(lfilesctx, self).manifest()
+                extensions.wrapfunction(ctx, 'files', files)
+
+                def manifest(orig):
+                    man1 = orig()
 
-                        class lfilesmanifest(man1.__class__):
-                            def __contains__(self, filename):
-                                orig = super(lfilesmanifest, self).__contains__
-                                return orig(filename) or orig(
-                                    lfutil.standin(filename)
-                                )
+                    class lfilesmanifest(man1.__class__):
+                        def __contains__(self, filename):
+                            orig = super(lfilesmanifest, self).__contains__
+                            return orig(filename) or orig(
+                                lfutil.standin(filename)
+                            )
 
-                        man1.__class__ = lfilesmanifest
-                        return man1
+                    man1.__class__ = lfilesmanifest
+                    return man1
 
-                    def filectx(self, path, fileid=None, filelog=None):
-                        orig = super(lfilesctx, self).filectx
-                        try:
-                            if filelog is not None:
-                                result = orig(path, fileid, filelog)
-                            else:
-                                result = orig(path, fileid)
-                        except error.LookupError:
-                            # Adding a null character will cause Mercurial to
-                            # identify this as a binary file.
-                            if filelog is not None:
-                                result = orig(
-                                    lfutil.standin(path), fileid, filelog
-                                )
-                            else:
-                                result = orig(lfutil.standin(path), fileid)
-                            olddata = result.data
-                            result.data = lambda: olddata() + b'\0'
-                        return result
+                extensions.wrapfunction(ctx, 'manifest', manifest)
 
-                ctx.__class__ = lfilesctx
+                def filectx(orig, path, fileid=None, filelog=None):
+                    try:
+                        if filelog is not None:
+                            result = orig(path, fileid, filelog)
+                        else:
+                            result = orig(path, fileid)
+                    except error.LookupError:
+                        # Adding a null character will cause Mercurial to
+                        # identify this as a binary file.
+                        if filelog is not None:
+                            result = orig(lfutil.standin(path), fileid, filelog)
+                        else:
+                            result = orig(lfutil.standin(path), fileid)
+                        olddata = result.data
+                        result.data = lambda: olddata() + b'\0'
+                    return result
+
+                extensions.wrapfunction(ctx, 'filectx', filectx)
+
             return ctx
 
         # Figure out the status of big files and insert them into the
@@ -130,14 +130,15 @@
             if match is None:
                 match = matchmod.always()
 
-            wlock = None
             try:
-                try:
-                    # updating the dirstate is optional
-                    # so we don't wait on the lock
-                    wlock = self.wlock(False)
-                except error.LockError:
-                    pass
+                # updating the dirstate is optional
+                # so we don't wait on the lock
+                wlock = self.wlock(False)
+                gotlock = True
+            except error.LockError:
+                wlock = util.nullcontextmanager()
+                gotlock = False
+            with wlock:
 
                 # First check if paths or patterns were specified on the
                 # command line.  If there were, and they don't match any
@@ -308,13 +309,9 @@
                         for items in result
                     ]
 
-                if wlock:
+                if gotlock:
                     lfdirstate.write()
 
-            finally:
-                if wlock:
-                    wlock.release()
-
             self.lfstatus = True
             return scmutil.status(*result)
 
@@ -360,20 +357,6 @@
                 )
                 return result
 
-        def push(self, remote, force=False, revs=None, newbranch=False):
-            if remote.local():
-                missing = set(self.requirements) - remote.local().supported
-                if missing:
-                    msg = _(
-                        b"required features are not"
-                        b" supported in the destination:"
-                        b" %s"
-                    ) % (b', '.join(sorted(missing)))
-                    raise error.Abort(msg)
-            return super(lfilesrepo, self).push(
-                remote, force=force, revs=revs, newbranch=newbranch
-            )
-
         # TODO: _subdirlfs should be moved into "lfutil.py", because
         # it is referred only from "lfutil.updatestandinsbymatch"
         def _subdirlfs(self, files, lfiles):
--- a/hgext/lfs/__init__.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/lfs/__init__.py	Tue Jan 21 13:14:51 2020 -0500
@@ -241,12 +241,12 @@
             if b'lfs' in repo.requirements:
                 return 0
 
-            last = kwargs.get(r'node_last')
+            last = kwargs.get('node_last')
             _bin = node.bin
             if last:
-                s = repo.set(b'%n:%n', _bin(kwargs[r'node']), _bin(last))
+                s = repo.set(b'%n:%n', _bin(kwargs['node']), _bin(last))
             else:
-                s = repo.set(b'%n', _bin(kwargs[r'node']))
+                s = repo.set(b'%n', _bin(kwargs['node']))
             match = repo._storenarrowmatch
             for ctx in s:
                 # TODO: is there a way to just walk the files in the commit?
@@ -399,6 +399,28 @@
 )
 def debuglfsupload(ui, repo, **opts):
     """upload lfs blobs added by the working copy parent or given revisions"""
-    revs = opts.get(r'rev', [])
+    revs = opts.get('rev', [])
     pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
     wrapper.uploadblobs(repo, pointers)
+
+
+@eh.wrapcommand(
+    b'verify',
+    opts=[(b'', b'no-lfs', None, _(b'skip missing lfs blob content'))],
+)
+def verify(orig, ui, repo, **opts):
+    skipflags = repo.ui.configint(b'verify', b'skipflags')
+    no_lfs = opts.pop('no_lfs')
+
+    if skipflags:
+        # --lfs overrides the config bit, if set.
+        if no_lfs is False:
+            skipflags &= ~repository.REVISION_FLAG_EXTSTORED
+    else:
+        skipflags = 0
+
+    if no_lfs is True:
+        skipflags |= repository.REVISION_FLAG_EXTSTORED
+
+    with ui.configoverride({(b'verify', b'skipflags'): skipflags}):
+        return orig(ui, repo, **opts)
--- a/hgext/lfs/blobstore.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/lfs/blobstore.py	Tue Jan 21 13:14:51 2020 -0500
@@ -155,15 +155,29 @@
 
         return self.vfs(oid, b'rb')
 
-    def download(self, oid, src):
+    def download(self, oid, src, content_length):
         """Read the blob from the remote source in chunks, verify the content,
         and write to this local blobstore."""
         sha256 = hashlib.sha256()
+        size = 0
 
         with self.vfs(oid, b'wb', atomictemp=True) as fp:
             for chunk in util.filechunkiter(src, size=1048576):
                 fp.write(chunk)
                 sha256.update(chunk)
+                size += len(chunk)
+
+            # If the server advertised a length longer than what we actually
+            # received, then we should expect that the server crashed while
+            # producing the response (but the server has no way of telling us
+            # that), and we really don't need to try to write the response to
+            # the localstore, because it's not going to match the expected.
+            if content_length is not None and int(content_length) != size:
+                msg = (
+                    b"Response length (%s) does not match Content-Length "
+                    b"header (%d): likely server-side crash"
+                )
+                raise LfsRemoteError(_(msg) % (size, int(content_length)))
 
             realoid = node.hex(sha256.digest())
             if realoid != oid:
@@ -280,11 +294,11 @@
         """Enforces that any authentication performed is HTTP Basic
         Authentication.  No authentication is also acceptable.
         """
-        authreq = headers.get(r'www-authenticate', None)
+        authreq = headers.get('www-authenticate', None)
         if authreq:
             scheme = authreq.split()[0]
 
-            if scheme.lower() != r'basic':
+            if scheme.lower() != 'basic':
                 msg = _(b'the server must support Basic Authentication')
                 raise util.urlerr.httperror(
                     req.get_full_url(),
@@ -324,18 +338,18 @@
         See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
         """
         objects = [
-            {r'oid': pycompat.strurl(p.oid()), r'size': p.size()}
+            {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
             for p in pointers
         ]
         requestdata = pycompat.bytesurl(
             json.dumps(
-                {r'objects': objects, r'operation': pycompat.strurl(action),}
+                {'objects': objects, 'operation': pycompat.strurl(action),}
             )
         )
         url = b'%s/objects/batch' % self.baseurl
         batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
-        batchreq.add_header(r'Accept', r'application/vnd.git-lfs+json')
-        batchreq.add_header(r'Content-Type', r'application/vnd.git-lfs+json')
+        batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
+        batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
         try:
             with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
                 rawjson = rsp.read()
@@ -376,9 +390,9 @@
             headers = pycompat.bytestr(rsp.info()).strip()
             self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
 
-            if r'objects' in response:
-                response[r'objects'] = sorted(
-                    response[r'objects'], key=lambda p: p[r'oid']
+            if 'objects' in response:
+                response['objects'] = sorted(
+                    response['objects'], key=lambda p: p['oid']
                 )
             self.ui.debug(
                 b'%s\n'
@@ -386,7 +400,7 @@
                     json.dumps(
                         response,
                         indent=2,
-                        separators=(r'', r': '),
+                        separators=('', ': '),
                         sort_keys=True,
                     )
                 )
@@ -483,33 +497,36 @@
                 )
             request.data = filewithprogress(localstore.open(oid), None)
             request.get_method = lambda: r'PUT'
-            request.add_header(r'Content-Type', r'application/octet-stream')
-            request.add_header(r'Content-Length', len(request.data))
+            request.add_header('Content-Type', 'application/octet-stream')
+            request.add_header('Content-Length', len(request.data))
 
         for k, v in headers:
             request.add_header(pycompat.strurl(k), pycompat.strurl(v))
 
-        response = b''
         try:
-            with contextlib.closing(self.urlopener.open(request)) as req:
+            with contextlib.closing(self.urlopener.open(request)) as res:
+                contentlength = res.info().get(b"content-length")
                 ui = self.ui  # Shorten debug lines
                 if self.ui.debugflag:
-                    ui.debug(b'Status: %d\n' % req.status)
+                    ui.debug(b'Status: %d\n' % res.status)
                     # lfs-test-server and hg serve return headers in different
                     # order
-                    headers = pycompat.bytestr(req.info()).strip()
+                    headers = pycompat.bytestr(res.info()).strip()
                     ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
 
                 if action == b'download':
                     # If downloading blobs, store downloaded data to local
                     # blobstore
-                    localstore.download(oid, req)
+                    localstore.download(oid, res, contentlength)
                 else:
+                    blocks = []
                     while True:
-                        data = req.read(1048576)
+                        data = res.read(1048576)
                         if not data:
                             break
-                        response += data
+                        blocks.append(data)
+
+                    response = b"".join(blocks)
                     if response:
                         ui.debug(b'lfs %s response: %s' % (action, response))
         except util.urlerr.httperror as ex:
@@ -588,7 +605,9 @@
         else:
             oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
 
-        with self.ui.makeprogress(topic, total=total) as progress:
+        with self.ui.makeprogress(
+            topic, unit=_(b"bytes"), total=total
+        ) as progress:
             progress.update(0)
             processed = 0
             blobs = 0
@@ -635,7 +654,7 @@
     def readbatch(self, pointers, tostore):
         for p in _deduplicate(pointers):
             with self.vfs(p.oid(), b'rb') as fp:
-                tostore.download(p.oid(), fp)
+                tostore.download(p.oid(), fp, None)
 
 
 class _nullremote(object):
--- a/hgext/lfs/wireprotolfsserver.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/lfs/wireprotolfsserver.py	Tue Jan 21 13:14:51 2020 -0500
@@ -136,7 +136,7 @@
     lfsreq = pycompat.json_loads(req.bodyfh.read())
 
     # If no transfer handlers are explicitly requested, 'basic' is assumed.
-    if r'basic' not in lfsreq.get(r'transfers', [r'basic']):
+    if 'basic' not in lfsreq.get('transfers', ['basic']):
         _sethttperror(
             res,
             HTTP_BAD_REQUEST,
@@ -144,7 +144,7 @@
         )
         return True
 
-    operation = lfsreq.get(r'operation')
+    operation = lfsreq.get('operation')
     operation = pycompat.bytestr(operation)
 
     if operation not in (b'upload', b'download'):
@@ -160,13 +160,13 @@
     objects = [
         p
         for p in _batchresponseobjects(
-            req, lfsreq.get(r'objects', []), operation, localstore
+            req, lfsreq.get('objects', []), operation, localstore
         )
     ]
 
     rsp = {
-        r'transfer': r'basic',
-        r'objects': objects,
+        'transfer': 'basic',
+        'objects': objects,
     }
 
     res.status = hgwebcommon.statusmessage(HTTP_OK)
@@ -206,12 +206,12 @@
 
     for obj in objects:
         # Convert unicode to ASCII to create a filesystem path
-        soid = obj.get(r'oid')
-        oid = soid.encode(r'ascii')
+        soid = obj.get('oid')
+        oid = soid.encode('ascii')
         rsp = {
-            r'oid': soid,
-            r'size': obj.get(r'size'),  # XXX: should this check the local size?
-            # r'authenticated': True,
+            'oid': soid,
+            'size': obj.get('size'),  # XXX: should this check the local size?
+            # 'authenticated': True,
         }
 
         exists = True
@@ -234,9 +234,9 @@
             if inst.errno != errno.ENOENT:
                 _logexception(req)
 
-                rsp[r'error'] = {
-                    r'code': 500,
-                    r'message': inst.strerror or r'Internal Server Server',
+                rsp['error'] = {
+                    'code': 500,
+                    'message': inst.strerror or 'Internal Server Server',
                 }
                 yield rsp
                 continue
@@ -247,17 +247,17 @@
         # IFF they already exist locally.
         if action == b'download':
             if not exists:
-                rsp[r'error'] = {
-                    r'code': 404,
-                    r'message': r"The object does not exist",
+                rsp['error'] = {
+                    'code': 404,
+                    'message': "The object does not exist",
                 }
                 yield rsp
                 continue
 
             elif not verifies:
-                rsp[r'error'] = {
-                    r'code': 422,  # XXX: is this the right code?
-                    r'message': r"The object is corrupt",
+                rsp['error'] = {
+                    'code': 422,  # XXX: is this the right code?
+                    'message': "The object is corrupt",
                 }
                 yield rsp
                 continue
@@ -272,23 +272,23 @@
             # The spec doesn't mention the Accept header here, but avoid
             # a gratuitous deviation from lfs-test-server in the test
             # output.
-            hdr = {r'Accept': r'application/vnd.git-lfs'}
+            hdr = {'Accept': 'application/vnd.git-lfs'}
 
             auth = req.headers.get(b'Authorization', b'')
             if auth.startswith(b'Basic '):
-                hdr[r'Authorization'] = pycompat.strurl(auth)
+                hdr['Authorization'] = pycompat.strurl(auth)
 
             return hdr
 
-        rsp[r'actions'] = {
-            r'%s'
+        rsp['actions'] = {
+            '%s'
             % pycompat.strurl(action): {
-                r'href': pycompat.strurl(
+                'href': pycompat.strurl(
                     b'%s%s/.hg/lfs/objects/%s' % (req.baseurl, req.apppath, oid)
                 ),
                 # datetime.isoformat() doesn't include the 'Z' suffix
-                r"expires_at": expiresat.strftime(r'%Y-%m-%dT%H:%M:%SZ'),
-                r'header': _buildheader(),
+                "expires_at": expiresat.strftime('%Y-%m-%dT%H:%M:%SZ'),
+                'header': _buildheader(),
             }
         }
 
@@ -327,7 +327,7 @@
 
         statusmessage = hgwebcommon.statusmessage
         try:
-            localstore.download(oid, req.bodyfh)
+            localstore.download(oid, req.bodyfh, req.headers[b'Content-Length'])
             res.status = statusmessage(HTTP_OK if existed else HTTP_CREATED)
         except blobstore.LfsCorruptionError:
             _logexception(req)
--- a/hgext/lfs/wrapper.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/lfs/wrapper.py	Tue Jan 21 13:14:51 2020 -0500
@@ -151,12 +151,12 @@
         if node is None:
             # both None - likely working copy content where node is not ready
             return False
-        rev = rlog._revlog.rev(node)
+        rev = rlog.rev(node)
     else:
-        node = rlog._revlog.node(rev)
+        node = rlog.node(rev)
     if node == nullid:
         return False
-    flags = rlog._revlog.flags(rev)
+    flags = rlog.flags(rev)
     return bool(flags & revlog.REVIDX_EXTSTORED)
 
 
@@ -203,7 +203,7 @@
 
 # Wrapping may also be applied by remotefilelog
 def filelogrenamed(orig, self, node):
-    if _islfs(self, node):
+    if _islfs(self._revlog, node):
         rawtext = self._revlog.rawdata(node)
         if not rawtext:
             return False
@@ -217,7 +217,7 @@
 
 # Wrapping may also be applied by remotefilelog
 def filelogsize(orig, self, rev):
-    if _islfs(self, rev=rev):
+    if _islfs(self._revlog, rev=rev):
         # fast path: use lfs metadata to answer size
         rawtext = self._revlog.rawdata(rev)
         metadata = pointer.deserialize(rawtext)
@@ -225,6 +225,25 @@
     return orig(self, rev)
 
 
+@eh.wrapfunction(revlog, b'_verify_revision')
+def _verify_revision(orig, rl, skipflags, state, node):
+    if _islfs(rl, node=node):
+        rawtext = rl.rawdata(node)
+        metadata = pointer.deserialize(rawtext)
+
+        # Don't skip blobs that are stored locally, as local verification is
+        # relatively cheap and there's no other way to verify the raw data in
+        # the revlog.
+        if rl.opener.lfslocalblobstore.has(metadata.oid()):
+            skipflags &= ~revlog.REVIDX_EXTSTORED
+        elif skipflags & revlog.REVIDX_EXTSTORED:
+            # The wrapped method will set `skipread`, but there's enough local
+            # info to check renames.
+            state[b'safe_renamed'].add(node)
+
+    orig(rl, skipflags, state, node)
+
+
 @eh.wrapfunction(context.basefilectx, b'cmp')
 def filectxcmp(orig, self, fctx):
     """returns True if text is different than fctx"""
@@ -248,7 +267,7 @@
 
 
 def filectxislfs(self):
-    return _islfs(self.filelog(), self.filenode())
+    return _islfs(self.filelog()._revlog, self.filenode())
 
 
 @eh.wrapfunction(cmdutil, b'_updatecatformatter')
@@ -459,7 +478,7 @@
         else:
             return None
     fctx = _ctx[f]
-    if not _islfs(fctx.filelog(), fctx.filenode()):
+    if not _islfs(fctx.filelog()._revlog, fctx.filenode()):
         return None
     try:
         p = pointer.deserialize(fctx.rawdata())
--- a/hgext/mq.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/mq.py	Tue Jan 21 13:14:51 2020 -0500
@@ -68,6 +68,7 @@
 import os
 import re
 import shutil
+import sys
 from mercurial.i18n import _
 from mercurial.node import (
     bin,
@@ -490,7 +491,7 @@
     def __init__(self, ui, baseui, path, patchdir=None):
         self.basepath = path
         try:
-            with open(os.path.join(path, b'patches.queue'), r'rb') as fh:
+            with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
                 cur = fh.read().rstrip()
 
             if not cur:
@@ -1251,16 +1252,19 @@
         return None, None
 
     def putsubstate2changes(self, substatestate, changes):
-        for files in changes[:3]:
-            if b'.hgsubstate' in files:
-                return  # already listed up
+        if isinstance(changes, list):
+            mar = changes[:3]
+        else:
+            mar = (changes.modified, changes.added, changes.removed)
+        if any((b'.hgsubstate' in files for files in mar)):
+            return  # already listed up
         # not yet listed up
         if substatestate in b'a?':
-            changes[1].append(b'.hgsubstate')
+            mar[1].append(b'.hgsubstate')
         elif substatestate in b'r':
-            changes[2].append(b'.hgsubstate')
+            mar[2].append(b'.hgsubstate')
         else:  # modified
-            changes[0].append(b'.hgsubstate')
+            mar[0].append(b'.hgsubstate')
 
     def checklocalchanges(self, repo, force=False, refresh=True):
         excsuffix = b''
@@ -1377,8 +1381,9 @@
         else:
             changes = self.checklocalchanges(repo, force=True)
         commitfiles = list(inclsubs)
-        for files in changes[:3]:
-            commitfiles.extend(files)
+        commitfiles.extend(changes.modified)
+        commitfiles.extend(changes.added)
+        commitfiles.extend(changes.removed)
         match = scmutil.matchfiles(repo, commitfiles)
         if len(repo[None].parents()) > 1:
             raise error.Abort(_(b'cannot manage merge changesets'))
@@ -1818,7 +1823,8 @@
             if update:
                 qp = self.qparents(repo, rev)
                 ctx = repo[qp]
-                m, a, r, d = repo.status(qp, b'.')[:4]
+                st = repo.status(qp, b'.')
+                m, a, r, d = st.modified, st.added, st.removed, st.deleted
                 if d:
                     raise error.Abort(_(b"deletions found between repo revs"))
 
@@ -1910,10 +1916,11 @@
             # and then commit.
             #
             # this should really read:
-            #   mm, dd, aa = repo.status(top, patchparent)[:3]
+            #   st = repo.status(top, patchparent)
             # but we do it backwards to take advantage of manifest/changelog
             # caching against the next repo.status call
-            mm, aa, dd = repo.status(patchparent, top)[:3]
+            st = repo.status(patchparent, top)
+            mm, aa, dd = st.modified, st.added, st.removed
             ctx = repo[top]
             aaa = aa[:]
             match1 = scmutil.match(repo[None], pats, opts)
@@ -1927,7 +1934,8 @@
                 match1 = scmutil.match(repo[None], opts=opts)
             else:
                 match = scmutil.matchall(repo)
-            m, a, r, d = repo.status(match=match)[:4]
+            stb = repo.status(match=match)
+            m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted
             mm = set(mm)
             aa = set(aa)
             dd = set(dd)
@@ -1966,7 +1974,8 @@
 
             # create 'match' that includes the files to be recommitted.
             # apply match1 via repo.status to ensure correct case handling.
-            cm, ca, cr, cd = repo.status(patchparent, match=match1)[:4]
+            st = repo.status(patchparent, match=match1)
+            cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted
             allmatches = set(cm + ca + cr + cd)
             refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
 
@@ -2248,7 +2257,6 @@
     def restore(self, repo, rev, delete=None, qupdate=None):
         desc = repo[rev].description().strip()
         lines = desc.splitlines()
-        i = 0
         datastart = None
         series = []
         applied = []
@@ -2777,7 +2785,7 @@
 
     This command is deprecated. Without -c, it's implied by other relevant
     commands. With -c, use :hg:`init --mq` instead."""
-    return qinit(ui, repo, create=opts.get(r'create_repo'))
+    return qinit(ui, repo, create=opts.get('create_repo'))
 
 
 @command(
@@ -2933,7 +2941,7 @@
 
     Returns 0 on success."""
     repo.mq.qseries(
-        repo, missing=opts.get(r'missing'), summary=opts.get(r'summary')
+        repo, missing=opts.get('missing'), summary=opts.get('summary')
     )
     return 0
 
@@ -2960,7 +2968,7 @@
             start=t - 1,
             length=1,
             status=b'A',
-            summary=opts.get(r'summary'),
+            summary=opts.get('summary'),
         )
     else:
         ui.write(_(b"no patches applied\n"))
@@ -2982,7 +2990,7 @@
     if end == len(q.series):
         ui.write(_(b"all patches applied\n"))
         return 1
-    q.qseries(repo, start=end, length=1, summary=opts.get(r'summary'))
+    q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
 
 
 @command(
@@ -3005,7 +3013,7 @@
         return 1
     idx = q.series.index(q.applied[-2].name)
     q.qseries(
-        repo, start=idx, length=1, status=b'A', summary=opts.get(r'summary')
+        repo, start=idx, length=1, status=b'A', summary=opts.get('summary')
     )
 
 
@@ -3356,8 +3364,8 @@
     applied = set(p.name for p in q.applied)
     patch = None
     args = list(args)
-    if opts.get(r'list'):
-        if args or opts.get(r'none'):
+    if opts.get('list'):
+        if args or opts.get('none'):
             raise error.Abort(
                 _(b'cannot mix -l/--list with options or arguments')
             )
@@ -3372,7 +3380,7 @@
         patch = args.pop(0)
     if patch is None:
         raise error.Abort(_(b'no patch to work with'))
-    if args or opts.get(r'none'):
+    if args or opts.get('none'):
         idx = q.findseries(patch)
         if idx is None:
             raise error.Abort(_(b'no patch named %s') % patch)
@@ -3634,9 +3642,7 @@
     This command is deprecated, use :hg:`rebase` instead."""
     rev = repo.lookup(rev)
     q = repo.mq
-    q.restore(
-        repo, rev, delete=opts.get(r'delete'), qupdate=opts.get(r'update')
-    )
+    q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update'))
     q.savedirty()
     return 0
 
@@ -3841,9 +3847,9 @@
 
     Returns 0 on success.
     """
-    if not opts.get(r'applied') and not revrange:
+    if not opts.get('applied') and not revrange:
         raise error.Abort(_(b'no revisions specified'))
-    elif opts.get(r'applied'):
+    elif opts.get('applied'):
         revrange = (b'qbase::qtip',) + revrange
 
     q = repo.mq
@@ -4072,9 +4078,9 @@
 
         def invalidateall(self):
             super(mqrepo, self).invalidateall()
-            if localrepo.hasunfilteredcache(self, r'mq'):
+            if localrepo.hasunfilteredcache(self, 'mq'):
                 # recreate mq in case queue path was changed
-                delattr(self.unfiltered(), r'mq')
+                delattr(self.unfiltered(), 'mq')
 
         def abortifwdirpatched(self, errmsg, force=False):
             if self.mq.applied and self.mq.checkapplied and not force:
@@ -4172,16 +4178,16 @@
 
 def mqimport(orig, ui, repo, *args, **kwargs):
     if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
-        r'no_commit', False
+        'no_commit', False
     ):
         repo.abortifwdirpatched(
-            _(b'cannot import over an applied patch'), kwargs.get(r'force')
+            _(b'cannot import over an applied patch'), kwargs.get('force')
         )
     return orig(ui, repo, *args, **kwargs)
 
 
 def mqinit(orig, ui, *args, **kwargs):
-    mq = kwargs.pop(r'mq', None)
+    mq = kwargs.pop('mq', None)
 
     if not mq:
         return orig(ui, *args, **kwargs)
@@ -4206,7 +4212,7 @@
     """Add --mq option to operate on patch repository instead of main"""
 
     # some commands do not like getting unknown options
-    mq = kwargs.pop(r'mq', None)
+    mq = kwargs.pop('mq', None)
 
     if not mq:
         return orig(ui, repo, *args, **kwargs)
@@ -4272,8 +4278,9 @@
 
     dotable(commands.table)
 
+    thismodule = sys.modules["hgext.mq"]
     for extname, extmodule in extensions.extensions():
-        if extmodule.__file__ != __file__:
+        if extmodule != thismodule:
             dotable(getattr(extmodule, 'cmdtable', {}))
 
 
--- a/hgext/narrow/__init__.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/narrow/__init__.py	Tue Jan 21 13:14:51 2020 -0500
@@ -8,12 +8,6 @@
 
 from __future__ import absolute_import
 
-# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
-# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
-# be specifying the version(s) of Mercurial they are tested with, or
-# leave the attribute unspecified.
-testedwith = b'ships-with-hg-core'
-
 from mercurial import (
     localrepo,
     registrar,
@@ -29,6 +23,12 @@
     narrowwirepeer,
 )
 
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = b'ships-with-hg-core'
+
 configtable = {}
 configitem = registrar.configitem(configtable)
 # Narrowhg *has* support for serving ellipsis nodes (which are used at
--- a/hgext/narrow/narrowbundle2.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/narrow/narrowbundle2.py	Tue Jan 21 13:14:51 2020 -0500
@@ -62,8 +62,8 @@
         raise ValueError(_(b'no common changegroup version'))
     version = max(cgversions)
 
-    include = sorted(filter(bool, kwargs.get(r'includepats', [])))
-    exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
+    include = sorted(filter(bool, kwargs.get('includepats', [])))
+    exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
     generateellipsesbundle2(
         bundler,
         repo,
@@ -72,7 +72,7 @@
         version,
         common,
         heads,
-        kwargs.get(r'depth', None),
+        kwargs.get('depth', None),
     )
 
 
@@ -316,7 +316,7 @@
         if repo.ui.has_section(_NARROWACL_SECTION):
             kwargs = exchange.applynarrowacl(repo, kwargs)
 
-        if kwargs.get(r'narrow', False) and repo.ui.configbool(
+        if kwargs.get('narrow', False) and repo.ui.configbool(
             b'experimental', b'narrowservebrokenellipses'
         ):
             getbundlechangegrouppart_narrow(*args, **kwargs)
--- a/hgext/narrow/narrowcommands.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/narrow/narrowcommands.py	Tue Jan 21 13:14:51 2020 -0500
@@ -22,6 +22,7 @@
     hg,
     narrowspec,
     node,
+    pathutil,
     pycompat,
     registrar,
     repair,
@@ -136,8 +137,8 @@
 
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             orig(pullop, kwargs)
-            if opts.get(r'depth'):
-                kwargs[b'depth'] = opts[r'depth']
+            if opts.get('depth'):
+                kwargs[b'depth'] = opts['depth']
 
         wrappedextraprepare = extensions.wrappedfunction(
             exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
@@ -151,15 +152,15 @@
     """Wraps archive command to narrow the default includes."""
     if repository.NARROW_REQUIREMENT in repo.requirements:
         repo_includes, repo_excludes = repo.narrowpats
-        includes = set(opts.get(r'include', []))
-        excludes = set(opts.get(r'exclude', []))
+        includes = set(opts.get('include', []))
+        excludes = set(opts.get('exclude', []))
         includes, excludes, unused_invalid = narrowspec.restrictpatterns(
             includes, excludes, repo_includes, repo_excludes
         )
         if includes:
-            opts[r'include'] = includes
+            opts['include'] = includes
         if excludes:
-            opts[r'exclude'] = excludes
+            opts['exclude'] = excludes
     return orig(ui, repo, *args, **opts)
 
 
@@ -277,7 +278,7 @@
                     todelete.append(f)
             elif f.startswith(b'meta/'):
                 dir = f[5:-13]
-                dirs = sorted(util.dirs({dir})) + [dir]
+                dirs = sorted(pathutil.dirs({dir})) + [dir]
                 include = True
                 for d in dirs:
                     visit = newmatch.visitdir(d)
--- a/hgext/narrow/narrowwirepeer.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/narrow/narrowwirepeer.py	Tue Jan 21 13:14:51 2020 -0500
@@ -33,8 +33,8 @@
                 # TODO: don't blindly add include/exclude wireproto
                 # arguments to unbundle.
                 include, exclude = repo.narrowpats
-                kwargs[r"includepats"] = b','.join(include)
-                kwargs[r"excludepats"] = b','.join(exclude)
+                kwargs["includepats"] = b','.join(include)
+                kwargs["excludepats"] = b','.join(exclude)
             return orig(cmd, *args, **kwargs)
 
         extensions.wrapfunction(peer, b'_calltwowaystream', wrapped)
@@ -139,12 +139,12 @@
 
 
 def peernarrowwiden(remote, **kwargs):
-    for ch in (r'commonheads', r'known'):
+    for ch in ('commonheads', 'known'):
         kwargs[ch] = wireprototypes.encodelist(kwargs[ch])
 
-    for ch in (r'oldincludes', r'newincludes', r'oldexcludes', r'newexcludes'):
+    for ch in ('oldincludes', 'newincludes', 'oldexcludes', 'newexcludes'):
         kwargs[ch] = b','.join(kwargs[ch])
 
-    kwargs[r'ellipses'] = b'%i' % bool(kwargs[r'ellipses'])
+    kwargs['ellipses'] = b'%i' % bool(kwargs['ellipses'])
     f = remote._callcompressable(b'narrow_widen', **kwargs)
     return bundle2.getunbundler(remote.ui, f)
--- a/hgext/notify.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/notify.py	Tue Jan 21 13:14:51 2020 -0500
@@ -388,13 +388,13 @@
             raise error.Abort(inst)
 
         # store sender and subject
-        sender = msg[r'From']
-        subject = msg[r'Subject']
+        sender = msg['From']
+        subject = msg['Subject']
         if sender is not None:
             sender = mail.headdecode(sender)
         if subject is not None:
             subject = mail.headdecode(subject)
-        del msg[r'From'], msg[r'Subject']
+        del msg['From'], msg['Subject']
 
         if not msg.is_multipart():
             # create fresh mime message from scratch
@@ -407,7 +407,7 @@
             for k, v in headers:
                 msg[k] = v
 
-        msg[r'Date'] = encoding.strfromlocal(
+        msg['Date'] = encoding.strfromlocal(
             dateutil.datestr(format=b"%a, %d %b %Y %H:%M:%S %1%2")
         )
 
@@ -421,8 +421,8 @@
         maxsubject = int(self.ui.config(b'notify', b'maxsubject'))
         if maxsubject:
             subject = stringutil.ellipsis(subject, maxsubject)
-        msg[r'Subject'] = encoding.strfromlocal(
-            mail.headencode(self.ui, subject, self.charsets, self.test)
+        msg['Subject'] = mail.headencode(
+            self.ui, subject, self.charsets, self.test
         )
 
         # try to make message have proper sender
@@ -430,14 +430,14 @@
             sender = self.ui.config(b'email', b'from') or self.ui.username()
         if b'@' not in sender or b'@localhost' in sender:
             sender = self.fixmail(sender)
-        msg[r'From'] = encoding.strfromlocal(
-            mail.addressencode(self.ui, sender, self.charsets, self.test)
+        msg['From'] = mail.addressencode(
+            self.ui, sender, self.charsets, self.test
         )
 
-        msg[r'X-Hg-Notification'] = r'changeset %s' % ctx
-        if not msg[r'Message-Id']:
-            msg[r'Message-Id'] = messageid(ctx, self.domain, self.messageidseed)
-        msg[r'To'] = encoding.strfromlocal(b', '.join(sorted(subs)))
+        msg['X-Hg-Notification'] = 'changeset %s' % ctx
+        if not msg['Message-Id']:
+            msg['Message-Id'] = messageid(ctx, self.domain, self.messageidseed)
+        msg['To'] = ', '.join(sorted(subs))
 
         msgtext = msg.as_bytes() if pycompat.ispy3 else msg.as_string()
         if self.test:
@@ -451,7 +451,7 @@
             )
             mail.sendmail(
                 self.ui,
-                emailutils.parseaddr(msg[r'From'])[1],
+                emailutils.parseaddr(msg['From'])[1],
                 subs,
                 msgtext,
                 mbox=self.mbox,
--- a/hgext/patchbomb.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/patchbomb.py	Tue Jan 21 13:14:51 2020 -0500
@@ -285,7 +285,7 @@
         if body:
             msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(b'test')))
         p = mail.mimetextpatch(
-            b'\n'.join(patchlines), b'x-patch', opts.get(b'test')
+            b'\n'.join(patchlines), 'x-patch', opts.get(b'test')
         )
         binnode = nodemod.bin(node)
         # if node is mq patch, it will have the patch file's name as a tag
@@ -306,8 +306,8 @@
         disposition = r'inline'
         if opts.get(b'attach'):
             disposition = r'attachment'
-        p[r'Content-Disposition'] = (
-            disposition + r'; filename=' + encoding.strfromlocal(patchname)
+        p['Content-Disposition'] = (
+            disposition + '; filename=' + encoding.strfromlocal(patchname)
         )
         msg.attach(p)
     else:
@@ -321,10 +321,10 @@
         subj = b' '.join([prefix, opts.get(b'subject') or subj])
     else:
         subj = b' '.join([prefix, subj])
-    msg[b'Subject'] = mail.headencode(ui, subj, _charsets, opts.get(b'test'))
-    msg[b'X-Mercurial-Node'] = node
-    msg[b'X-Mercurial-Series-Index'] = b'%i' % idx
-    msg[b'X-Mercurial-Series-Total'] = b'%i' % total
+    msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get(b'test'))
+    msg['X-Mercurial-Node'] = pycompat.sysstr(node)
+    msg['X-Mercurial-Series-Index'] = '%i' % idx
+    msg['X-Mercurial-Series-Total'] = '%i' % total
     return msg, subj, ds
 
 
@@ -358,7 +358,7 @@
     tmpfn = os.path.join(tmpdir, b'bundle')
     btype = ui.config(b'patchbomb', b'bundletype')
     if btype:
-        opts[r'type'] = btype
+        opts['type'] = btype
     try:
         commands.bundle(ui, repo, tmpfn, dest, **opts)
         return util.readfile(tmpfn)
@@ -379,8 +379,8 @@
     the user through the editor.
     """
     ui = repo.ui
-    if opts.get(r'desc'):
-        body = open(opts.get(r'desc')).read()
+    if opts.get('desc'):
+        body = open(opts.get('desc')).read()
     else:
         ui.write(
             _(b'\nWrite the introductory message for the patch series.\n\n')
@@ -403,25 +403,25 @@
     """
     ui = repo.ui
     _charsets = mail._charsets(ui)
-    subj = opts.get(r'subject') or prompt(
+    subj = opts.get('subject') or prompt(
         ui, b'Subject:', b'A bundle for your repository'
     )
 
     body = _getdescription(repo, b'', sender, **opts)
     msg = emimemultipart.MIMEMultipart()
     if body:
-        msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(r'test')))
-    datapart = emimebase.MIMEBase(r'application', r'x-mercurial-bundle')
+        msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
+    datapart = emimebase.MIMEBase('application', 'x-mercurial-bundle')
     datapart.set_payload(bundle)
-    bundlename = b'%s.hg' % opts.get(r'bundlename', b'bundle')
+    bundlename = b'%s.hg' % opts.get('bundlename', b'bundle')
     datapart.add_header(
-        r'Content-Disposition',
-        r'attachment',
+        'Content-Disposition',
+        'attachment',
         filename=encoding.strfromlocal(bundlename),
     )
     emailencoders.encode_base64(datapart)
     msg.attach(datapart)
-    msg[b'Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test'))
+    msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
     return [(msg, subj, None)]
 
 
@@ -434,9 +434,9 @@
 
     # use the last revision which is likely to be a bookmarked head
     prefix = _formatprefix(
-        ui, repo, revs.last(), opts.get(r'flag'), 0, len(patches), numbered=True
+        ui, repo, revs.last(), opts.get('flag'), 0, len(patches), numbered=True
     )
-    subj = opts.get(r'subject') or prompt(
+    subj = opts.get('subject') or prompt(
         ui, b'(optional) Subject: ', rest=prefix, default=b''
     )
     if not subj:
@@ -445,7 +445,7 @@
     subj = prefix + b' ' + subj
 
     body = b''
-    if opts.get(r'diffstat'):
+    if opts.get('diffstat'):
         # generate a cumulative diffstat of the whole patch series
         diffstat = patch.diffstat(sum(patches, []))
         body = b'\n' + diffstat
@@ -453,8 +453,8 @@
         diffstat = None
 
     body = _getdescription(repo, body, sender, **opts)
-    msg = mail.mimeencode(ui, body, _charsets, opts.get(r'test'))
-    msg[b'Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test'))
+    msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
+    msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
     return (msg, subj, diffstat)
 
 
@@ -522,9 +522,11 @@
 
 
 def _msgid(node, timestamp):
-    hostname = encoding.strtolocal(socket.getfqdn())
-    hostname = encoding.environ.get(b'HGHOSTNAME', hostname)
-    return b'<%s.%d@%s>' % (node, timestamp, hostname)
+    try:
+        hostname = encoding.strfromlocal(encoding.environ[b'HGHOSTNAME'])
+    except KeyError:
+        hostname = socket.getfqdn()
+    return '<%s.%d@%s>' % (node, timestamp, hostname)
 
 
 emailopts = [
@@ -765,8 +767,7 @@
                 b" do not re-specify --outgoing"
             )
         )
-    if rev and bookmark:
-        raise error.Abort(_(b"-r and -B are mutually exclusive"))
+    cmdutil.check_at_most_one_arg(opts, b'rev', b'bookmark')
 
     if outgoing or bundle:
         if len(revs) > 1:
@@ -847,7 +848,7 @@
         stropts = pycompat.strkwargs(opts)
         bundledata = _getbundle(repo, dest, **stropts)
         bundleopts = stropts.copy()
-        bundleopts.pop(r'bundle', None)  # already processed
+        bundleopts.pop('bundle', None)  # already processed
         msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts)
     else:
         msgs = _getpatchmsgs(repo, sender, revs, **pycompat.strkwargs(opts))
@@ -912,10 +913,11 @@
     parent = opts.get(b'in_reply_to') or None
     # angle brackets may be omitted, they're not semantically part of the msg-id
     if parent is not None:
-        if not parent.startswith(b'<'):
-            parent = b'<' + parent
-        if not parent.endswith(b'>'):
-            parent += b'>'
+        parent = encoding.strfromlocal(parent)
+        if not parent.startswith('<'):
+            parent = '<' + parent
+        if not parent.endswith('>'):
+            parent += '>'
 
     sender_addr = eutil.parseaddr(encoding.strfromlocal(sender))[1]
     sender = mail.addressencode(ui, sender, _charsets, opts.get(b'test'))
@@ -926,56 +928,36 @@
     )
     for i, (m, subj, ds) in enumerate(msgs):
         try:
-            m[b'Message-Id'] = genmsgid(m[b'X-Mercurial-Node'])
+            m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
             if not firstpatch:
-                firstpatch = m[b'Message-Id']
-            m[b'X-Mercurial-Series-Id'] = firstpatch
+                firstpatch = m['Message-Id']
+            m['X-Mercurial-Series-Id'] = firstpatch
         except TypeError:
-            m[b'Message-Id'] = genmsgid(b'patchbomb')
+            m['Message-Id'] = genmsgid('patchbomb')
         if parent:
-            m[b'In-Reply-To'] = parent
-            m[b'References'] = parent
-        if not parent or b'X-Mercurial-Node' not in m:
-            parent = m[b'Message-Id']
+            m['In-Reply-To'] = parent
+            m['References'] = parent
+        if not parent or 'X-Mercurial-Node' not in m:
+            parent = m['Message-Id']
 
-        m[b'User-Agent'] = b'Mercurial-patchbomb/%s' % util.version()
-        m[b'Date'] = eutil.formatdate(start_time[0], localtime=True)
+        m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version().decode()
+        m['Date'] = eutil.formatdate(start_time[0], localtime=True)
 
         start_time = (start_time[0] + 1, start_time[1])
-        m[b'From'] = sender
-        m[b'To'] = b', '.join(to)
+        m['From'] = sender
+        m['To'] = ', '.join(to)
         if cc:
-            m[b'Cc'] = b', '.join(cc)
+            m['Cc'] = ', '.join(cc)
         if bcc:
-            m[b'Bcc'] = b', '.join(bcc)
+            m['Bcc'] = ', '.join(bcc)
         if replyto:
-            m[b'Reply-To'] = b', '.join(replyto)
-        # Fix up all headers to be native strings.
-        # TODO(durin42): this should probably be cleaned up above in the future.
-        if pycompat.ispy3:
-            for hdr, val in list(m.items()):
-                change = False
-                if isinstance(hdr, bytes):
-                    del m[hdr]
-                    hdr = pycompat.strurl(hdr)
-                    change = True
-                if isinstance(val, bytes):
-                    # header value should be ASCII since it's encoded by
-                    # mail.headencode(), but -n/--test disables it and raw
-                    # value of platform encoding is stored.
-                    val = encoding.strfromlocal(val)
-                    if not change:
-                        # prevent duplicate headers
-                        del m[hdr]
-                    change = True
-                if change:
-                    m[hdr] = val
+            m['Reply-To'] = ', '.join(replyto)
         if opts.get(b'test'):
             ui.status(_(b'displaying '), subj, b' ...\n')
             ui.pager(b'email')
             generator = mail.Generator(ui, mangle_from_=False)
             try:
-                generator.flatten(m, 0)
+                generator.flatten(m, False)
                 ui.write(b'\n')
             except IOError as inst:
                 if inst.errno != errno.EPIPE:
@@ -987,12 +969,11 @@
             progress.update(i, item=subj)
             if not mbox:
                 # Exim does not remove the Bcc field
-                del m[b'Bcc']
+                del m['Bcc']
             fp = stringio()
             generator = mail.Generator(fp, mangle_from_=False)
-            generator.flatten(m, 0)
+            generator.flatten(m, False)
             alldests = to + bcc + cc
-            alldests = [encoding.strfromlocal(d) for d in alldests]
             sendmail(sender_addr, alldests, fp.getvalue())
 
     progress.complete()
--- a/hgext/phabricator.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/phabricator.py	Tue Jan 21 13:14:51 2020 -0500
@@ -11,6 +11,10 @@
 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
 to update statuses in batch.
 
+A "phabstatus" view for :hg:`show` is also provided; it displays status
+information of Phabricator differentials associated with unfinished
+changesets.
+
 By default, Phabricator requires ``Test Plan`` which might prevent some
 changeset from being sent. The requirement could be disabled by changing
 ``differential.require-test-plan-field`` config server side.
@@ -60,7 +64,10 @@
     encoding,
     error,
     exthelper,
+    graphmod,
     httpconnection as httpconnectionmod,
+    localrepo,
+    logcmdutil,
     match,
     mdiff,
     obsutil,
@@ -80,6 +87,8 @@
     procutil,
     stringutil,
 )
+from . import show
+
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
@@ -93,6 +102,7 @@
 command = eh.command
 configtable = eh.configtable
 templatekeyword = eh.templatekeyword
+uisetup = eh.finaluisetup
 
 # developer config: phabricator.batchsize
 eh.configitem(
@@ -122,6 +132,12 @@
     b'phabricator.desc': b'',
     b'phabricator.drev': b'bold',
     b'phabricator.node': b'',
+    b'phabricator.status.abandoned': b'magenta dim',
+    b'phabricator.status.accepted': b'green bold',
+    b'phabricator.status.closed': b'green',
+    b'phabricator.status.needsreview': b'yellow',
+    b'phabricator.status.needsrevision': b'red',
+    b'phabricator.status.changesplanned': b'red',
 }
 
 _VCR_FLAGS = [
@@ -138,6 +154,44 @@
 ]
 
 
+@eh.wrapfunction(localrepo, "loadhgrc")
+def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements):
+    """Load ``.arcconfig`` content into a ui instance on repository open.
+    """
+    result = False
+    arcconfig = {}
+
+    try:
+        # json.loads only accepts bytes from 3.6+
+        rawparams = encoding.unifromlocal(wdirvfs.read(b".arcconfig"))
+        # json.loads only returns unicode strings
+        arcconfig = pycompat.rapply(
+            lambda x: encoding.unitolocal(x)
+            if isinstance(x, pycompat.unicode)
+            else x,
+            pycompat.json_loads(rawparams),
+        )
+
+        result = True
+    except ValueError:
+        ui.warn(_(b"invalid JSON in %s\n") % wdirvfs.join(b".arcconfig"))
+    except IOError:
+        pass
+
+    cfg = util.sortdict()
+
+    if b"repository.callsign" in arcconfig:
+        cfg[(b"phabricator", b"callsign")] = arcconfig[b"repository.callsign"]
+
+    if b"phabricator.uri" in arcconfig:
+        cfg[(b"phabricator", b"url")] = arcconfig[b"phabricator.uri"]
+
+    if cfg:
+        ui.applyconfig(cfg, source=wdirvfs.join(b".arcconfig"))
+
+    return orig(ui, wdirvfs, hgvfs, requirements) or result  # Load .hg/hgrc
+
+
 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
     fullflags = flags + _VCR_FLAGS
 
@@ -167,13 +221,13 @@
         return request
 
     def sanitiseresponse(response):
-        if r'set-cookie' in response[r'headers']:
-            del response[r'headers'][r'set-cookie']
+        if 'set-cookie' in response['headers']:
+            del response['headers']['set-cookie']
         return response
 
     def decorate(fn):
         def inner(*args, **kwargs):
-            cassette = pycompat.fsdecode(kwargs.pop(r'test_vcr', None))
+            cassette = pycompat.fsdecode(kwargs.pop('test_vcr', None))
             if cassette:
                 import hgdemandimport
 
@@ -182,24 +236,24 @@
                     import vcr.stubs as stubs
 
                     vcr = vcrmod.VCR(
-                        serializer=r'json',
+                        serializer='json',
                         before_record_request=sanitiserequest,
                         before_record_response=sanitiseresponse,
                         custom_patches=[
                             (
                                 urlmod,
-                                r'httpconnection',
+                                'httpconnection',
                                 stubs.VCRHTTPConnection,
                             ),
                             (
                                 urlmod,
-                                r'httpsconnection',
+                                'httpsconnection',
                                 stubs.VCRHTTPSConnection,
                             ),
                         ],
                     )
-                    vcr.register_matcher(r'hgmatcher', hgmatcher)
-                    with vcr.use_cassette(cassette, match_on=[r'hgmatcher']):
+                    vcr.register_matcher('hgmatcher', hgmatcher)
+                    with vcr.use_cassette(cassette, match_on=['hgmatcher']):
                         return fn(*args, **kwargs)
             return fn(*args, **kwargs)
 
@@ -389,7 +443,7 @@
     corresponding Differential Revision, and exist in the repo.
     """
     unfi = repo.unfiltered()
-    nodemap = unfi.changelog.nodemap
+    has_node = unfi.changelog.index.has_node
 
     result = {}  # {node: (oldnode?, lastdiff?, drev)}
     toconfirm = {}  # {node: (force, {precnode}, drev)}
@@ -398,17 +452,20 @@
         # For tags like "D123", put them into "toconfirm" to verify later
         precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
         for n in precnodes:
-            if n in nodemap:
+            if has_node(n):
                 for tag in unfi.nodetags(n):
                     m = _differentialrevisiontagre.match(tag)
                     if m:
                         toconfirm[node] = (0, set(precnodes), int(m.group(1)))
-                        continue
-
-        # Check commit message
-        m = _differentialrevisiondescre.search(ctx.description())
-        if m:
-            toconfirm[node] = (1, set(precnodes), int(m.group(r'id')))
+                        break
+                else:
+                    continue  # move to next predecessor
+                break  # found a tag, stop
+        else:
+            # Check commit message
+            m = _differentialrevisiondescre.search(ctx.description())
+            if m:
+                toconfirm[node] = (1, set(precnodes), int(m.group('id')))
 
     # Double check if tags are genuine by collecting all old nodes from
     # Phabricator, and expect precursors overlap with it.
@@ -454,7 +511,7 @@
             if diffs:
                 lastdiff = max(diffs, key=lambda d: int(d[b'id']))
                 oldnode = getnode(lastdiff)
-                if oldnode and oldnode not in nodemap:
+                if oldnode and not has_node(oldnode):
                     oldnode = None
 
             result[newnode] = (oldnode, lastdiff, drev)
@@ -462,6 +519,29 @@
     return result
 
 
+def getdrevmap(repo, revs):
+    """Return a dict mapping each rev in `revs` to their Differential Revision
+    ID or None.
+    """
+    result = {}
+    for rev in revs:
+        result[rev] = None
+        ctx = repo[rev]
+        # Check commit message
+        m = _differentialrevisiondescre.search(ctx.description())
+        if m:
+            result[rev] = int(m.group('id'))
+            continue
+        # Check tags
+        for tag in repo.nodetags(ctx.node()):
+            m = _differentialrevisiontagre.match(tag)
+            if m:
+                result[rev] = int(m.group(1))
+                break
+
+    return result
+
+
 def getdiff(ctx, diffopts):
     """plain-text diff without header (user, commit message, etc)"""
     output = util.stringio()
@@ -609,26 +689,25 @@
     """
     ui = fctx.repo().ui
     chunks = callconduit(ui, b'file.querychunks', {b'filePHID': fphid})
-    progress = ui.makeprogress(
+    with ui.makeprogress(
         _(b'uploading file chunks'), unit=_(b'chunks'), total=len(chunks)
-    )
-    for chunk in chunks:
-        progress.increment()
-        if chunk[b'complete']:
-            continue
-        bstart = int(chunk[b'byteStart'])
-        bend = int(chunk[b'byteEnd'])
-        callconduit(
-            ui,
-            b'file.uploadchunk',
-            {
-                b'filePHID': fphid,
-                b'byteStart': bstart,
-                b'data': base64.b64encode(fctx.data()[bstart:bend]),
-                b'dataEncoding': b'base64',
-            },
-        )
-    progress.complete()
+    ) as progress:
+        for chunk in chunks:
+            progress.increment()
+            if chunk[b'complete']:
+                continue
+            bstart = int(chunk[b'byteStart'])
+            bend = int(chunk[b'byteEnd'])
+            callconduit(
+                ui,
+                b'file.uploadchunk',
+                {
+                    b'filePHID': fphid,
+                    b'byteStart': bstart,
+                    b'data': base64.b64encode(fctx.data()[bstart:bend]),
+                    b'dataEncoding': b'base64',
+                },
+            )
 
 
 def uploadfile(fctx):
@@ -1026,6 +1105,7 @@
     opts = pycompat.byteskwargs(opts)
     revs = list(revs) + opts.get(b'rev', [])
     revs = scmutil.revrange(repo, revs)
+    revs.sort()  # ascending order to preserve topological parent/child in phab
 
     if not revs:
         raise error.Abort(_(b'phabsend requires at least one changeset'))
@@ -1089,7 +1169,7 @@
             # Create a local tag to note the association, if commit message
             # does not have it already
             m = _differentialrevisiondescre.search(ctx.description())
-            if not m or int(m.group(r'id')) != newrevid:
+            if not m or int(m.group('id')) != newrevid:
                 tagname = b'D%d' % newrevid
                 tags.tag(
                     repo,
@@ -1235,6 +1315,7 @@
     b'needsrevision',
     b'closed',
     b'abandoned',
+    b'changesplanned',
 }
 
 
@@ -1636,7 +1717,7 @@
     m = _differentialrevisiondescre.search(ctx.description())
     if m:
         return templateutil.hybriddict(
-            {b'url': m.group(r'url'), b'id': b"D%s" % m.group(r'id'),}
+            {b'url': m.group('url'), b'id': b"D%s" % m.group('id'),}
         )
     else:
         tags = ctx.repo().nodetags(ctx.node())
@@ -1649,3 +1730,68 @@
 
                 return templateutil.hybriddict({b'url': url, b'id': t,})
     return None
+
+
+@eh.templatekeyword(b'phabstatus', requires={b'ctx', b'repo', b'ui'})
+def template_status(context, mapping):
+    """:phabstatus: String. Status of Phabricator differential.
+    """
+    ctx = context.resource(mapping, b'ctx')
+    repo = context.resource(mapping, b'repo')
+    ui = context.resource(mapping, b'ui')
+
+    rev = ctx.rev()
+    try:
+        drevid = getdrevmap(repo, [rev])[rev]
+    except KeyError:
+        return None
+    drevs = callconduit(ui, b'differential.query', {b'ids': [drevid]})
+    for drev in drevs:
+        if int(drev[b'id']) == drevid:
+            return templateutil.hybriddict(
+                {b'url': drev[b'uri'], b'status': drev[b'statusName'],}
+            )
+    return None
+
+
+@show.showview(b'phabstatus', csettopic=b'work')
+def phabstatusshowview(ui, repo, displayer):
+    """Phabricator differiential status"""
+    revs = repo.revs('sort(_underway(), topo)')
+    drevmap = getdrevmap(repo, revs)
+    unknownrevs, drevids, revsbydrevid = [], set([]), {}
+    for rev, drevid in pycompat.iteritems(drevmap):
+        if drevid is not None:
+            drevids.add(drevid)
+            revsbydrevid.setdefault(drevid, set([])).add(rev)
+        else:
+            unknownrevs.append(rev)
+
+    drevs = callconduit(ui, b'differential.query', {b'ids': list(drevids)})
+    drevsbyrev = {}
+    for drev in drevs:
+        for rev in revsbydrevid[int(drev[b'id'])]:
+            drevsbyrev[rev] = drev
+
+    def phabstatus(ctx):
+        drev = drevsbyrev[ctx.rev()]
+        status = ui.label(
+            b'%(statusName)s' % drev,
+            b'phabricator.status.%s' % _getstatusname(drev),
+        )
+        ui.write(b"\n%s %s\n" % (drev[b'uri'], status))
+
+    revs -= smartset.baseset(unknownrevs)
+    revdag = graphmod.dagwalker(repo, revs)
+
+    ui.setconfig(b'experimental', b'graphshorten', True)
+    displayer._exthook = phabstatus
+    nodelen = show.longestshortest(repo, revs)
+    logcmdutil.displaygraph(
+        ui,
+        repo,
+        revdag,
+        displayer,
+        graphmod.asciiedges,
+        props={b'nodelen': nodelen},
+    )
--- a/hgext/rebase.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/rebase.py	Tue Jan 21 13:14:51 2020 -0500
@@ -46,6 +46,7 @@
     repair,
     revset,
     revsetlang,
+    rewriteutil,
     scmutil,
     smartset,
     state as statemod,
@@ -393,17 +394,13 @@
             return _nothingtorebase()
 
         rebaseset = destmap.keys()
-        allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
-        if not (self.keepf or allowunstable) and self.repo.revs(
-            b'first(children(%ld) - %ld)', rebaseset, rebaseset
-        ):
-            raise error.Abort(
-                _(
-                    b"can't remove original changesets with"
-                    b" unrebased descendants"
-                ),
-                hint=_(b'use --keep to keep original changesets'),
-            )
+        if not self.keepf:
+            try:
+                rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
+            except error.Abort as e:
+                if e.hint is None:
+                    e.hint = _(b'use --keep to keep original changesets')
+                raise e
 
         result = buildstate(self.repo, destmap, self.collapsef)
 
@@ -412,13 +409,6 @@
             self.ui.status(_(b'nothing to rebase\n'))
             return _nothingtorebase()
 
-        for root in self.repo.set(b'roots(%ld)', rebaseset):
-            if not self.keepf and not root.mutable():
-                raise error.Abort(
-                    _(b"can't rebase public changeset %s") % root,
-                    hint=_(b"see 'hg help phases' for details"),
-                )
-
         (self.originalwd, self.destmap, self.state) = result
         if self.collapsef:
             dests = set(self.destmap.values())
@@ -797,7 +787,6 @@
                 cleanup = False
 
             if cleanup:
-                shouldupdate = False
                 if rebased:
                     strippoints = [
                         c.node() for c in repo.set(b'roots(%ld)', rebased)
@@ -809,7 +798,7 @@
                 shouldupdate = repo[b'.'].rev() in updateifonnodes
 
                 # Update away from the rebase if necessary
-                if shouldupdate or needupdate(repo, self.state):
+                if shouldupdate:
                     mergemod.update(
                         repo, self.originalwd, branchmerge=False, force=True
                     )
@@ -1019,20 +1008,16 @@
     """
     opts = pycompat.byteskwargs(opts)
     inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
-    dryrun = opts.get(b'dry_run')
-    confirm = opts.get(b'confirm')
-    selactions = [k for k in [b'abort', b'stop', b'continue'] if opts.get(k)]
-    if len(selactions) > 1:
-        raise error.Abort(
-            _(b'cannot use --%s with --%s') % tuple(selactions[:2])
+    action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
+    if action:
+        cmdutil.check_incompatible_arguments(
+            opts, action, b'confirm', b'dry_run'
         )
-    action = selactions[0] if selactions else None
-    if dryrun and action:
-        raise error.Abort(_(b'cannot specify both --dry-run and --%s') % action)
-    if confirm and action:
-        raise error.Abort(_(b'cannot specify both --confirm and --%s') % action)
-    if dryrun and confirm:
-        raise error.Abort(_(b'cannot specify both --confirm and --dry-run'))
+        cmdutil.check_incompatible_arguments(
+            opts, action, b'rev', b'source', b'base', b'dest'
+        )
+    cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
+    cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
 
     if action or repo.currenttransaction() is not None:
         # in-memory rebase is not compatible with resuming rebases.
@@ -1041,16 +1026,16 @@
         inmemory = False
 
     if opts.get(b'auto_orphans'):
-        for key in opts:
-            if key != b'auto_orphans' and opts.get(key):
-                raise error.Abort(
-                    _(b'--auto-orphans is incompatible with %s') % (b'--' + key)
-                )
+        disallowed_opts = set(opts) - {b'auto_orphans'}
+        cmdutil.check_incompatible_arguments(
+            opts, b'auto_orphans', *disallowed_opts
+        )
+
         userrevs = list(repo.revs(opts.get(b'auto_orphans')))
         opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
         opts[b'dest'] = b'_destautoorphanrebase(SRC)'
 
-    if dryrun or confirm:
+    if opts.get(b'dry_run') or opts.get(b'confirm'):
         return _dryrunrebase(ui, repo, action, opts)
     elif action == b'stop':
         rbsrt = rebaseruntime(repo, ui)
@@ -1071,10 +1056,9 @@
                         b'changesets'
                     ),
                 )
-            if needupdate(repo, rbsrt.state):
-                # update to the current working revision
-                # to clear interrupted merge
-                hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
+            # update to the current working revision
+            # to clear interrupted merge
+            hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
             rbsrt._finishrebase()
             return 0
     elif inmemory:
@@ -1167,14 +1151,6 @@
 ):
     assert action != b'stop'
     with repo.wlock(), repo.lock():
-        # Validate input and define rebasing points
-        destf = opts.get(b'dest', None)
-        srcf = opts.get(b'source', None)
-        basef = opts.get(b'base', None)
-        revf = opts.get(b'rev', [])
-        # search default destination in this space
-        # used in the 'hg pull --rebase' case, see issue 5214.
-        destspace = opts.get(b'_destspace')
         if opts.get(b'interactive'):
             try:
                 if extensions.find(b'histedit'):
@@ -1199,10 +1175,6 @@
                 raise error.Abort(
                     _(b'cannot use collapse with continue or abort')
                 )
-            if srcf or basef or destf:
-                raise error.Abort(
-                    _(b'abort and continue do not allow specifying revisions')
-                )
             if action == b'abort' and opts.get(b'tool', False):
                 ui.warn(_(b'tool option will be ignored\n'))
             if action == b'continue':
@@ -1215,14 +1187,17 @@
             if retcode is not None:
                 return retcode
         else:
+            # search default destination in this space
+            # used in the 'hg pull --rebase' case, see issue 5214.
+            destspace = opts.get(b'_destspace')
             destmap = _definedestmap(
                 ui,
                 repo,
                 inmemory,
-                destf,
-                srcf,
-                basef,
-                revf,
+                opts.get(b'dest', None),
+                opts.get(b'source', None),
+                opts.get(b'base', None),
+                opts.get(b'rev', []),
                 destspace=destspace,
             )
             retcode = rbsrt._preparenewrebase(destmap)
@@ -1267,15 +1242,9 @@
 
     # destspace is here to work around issues with `hg pull --rebase` see
     # issue5214 for details
-    if srcf and basef:
-        raise error.Abort(_(b'cannot specify both a source and a base'))
-    if revf and basef:
-        raise error.Abort(_(b'cannot specify both a revision and a base'))
-    if revf and srcf:
-        raise error.Abort(_(b'cannot specify both a revision and a source'))
 
+    cmdutil.checkunfinished(repo)
     if not inmemory:
-        cmdutil.checkunfinished(repo)
         cmdutil.bailifchanged(repo)
 
     if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
@@ -1460,13 +1429,13 @@
 
     # By convention, ``extra['branch']`` (set by extrafn) clobbers
     # ``branch`` (used when passing ``--keepbranches``).
-    branch = repo[p1].branch()
+    branch = None
     if b'branch' in extra:
         branch = extra[b'branch']
 
+    wctx.setparents(repo[p1].node(), repo[p2].node())
     memctx = wctx.tomemctx(
         commitmsg,
-        parents=(p1, p2),
         date=date,
         extra=extra,
         user=user,
@@ -1497,14 +1466,15 @@
 
 
 def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
-    b'Rebase a single revision rev on top of p1 using base as merge ancestor'
+    """Rebase a single revision rev on top of p1 using base as merge ancestor"""
     # Merge phase
     # Update to destination and merge it with local
+    p1ctx = repo[p1]
     if wctx.isinmemory():
-        wctx.setbase(repo[p1])
+        wctx.setbase(p1ctx)
     else:
         if repo[b'.'].rev() != p1:
-            repo.ui.debug(b" update to %d:%s\n" % (p1, repo[p1]))
+            repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
             mergemod.update(repo, p1, branchmerge=False, force=True)
         else:
             repo.ui.debug(b" already in destination\n")
@@ -1512,31 +1482,30 @@
         # as well as other data we litter on it in other places.
         wctx = repo[None]
         repo.dirstate.write(repo.currenttransaction())
-    repo.ui.debug(b" merge against %d:%s\n" % (rev, repo[rev]))
+    ctx = repo[rev]
+    repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
     if base is not None:
         repo.ui.debug(b"   detach base %d:%s\n" % (base, repo[base]))
-    # When collapsing in-place, the parent is the common ancestor, we
-    # have to allow merging with it.
+
+    # See explanation in merge.graft()
+    mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
     stats = mergemod.update(
         repo,
         rev,
         branchmerge=True,
         force=True,
         ancestor=base,
-        mergeancestor=collapse,
+        mergeancestor=mergeancestor,
         labels=[b'dest', b'source'],
         wc=wctx,
     )
     if collapse:
-        copies.duplicatecopies(repo, wctx, rev, dest)
+        copies.graftcopies(wctx, ctx, repo[dest])
     else:
         # If we're not using --collapse, we need to
         # duplicate copies between the revision we're
-        # rebasing and its first parent, but *not*
-        # duplicate any copies that have already been
-        # performed in the destination.
-        p1rev = repo[rev].p1().rev()
-        copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest)
+        # rebasing and its first parent.
+        copies.graftcopies(wctx, ctx, ctx.p1())
     return stats
 
 
@@ -1643,10 +1612,11 @@
 def successorrevs(unfi, rev):
     """yield revision numbers for successors of rev"""
     assert unfi.filtername is None
-    nodemap = unfi.changelog.nodemap
+    get_rev = unfi.changelog.index.get_rev
     for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
-        if s in nodemap:
-            yield nodemap[s]
+        r = get_rev(s)
+        if r is not None:
+            yield r
 
 
 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
@@ -1790,7 +1760,7 @@
     # But our merge base candidates (D and E in above case) could still be
     # better than the default (ancestor(F, Z) == null). Therefore still
     # pick one (so choose p1 above).
-    if sum(1 for b in bases if b != nullrev) > 1:
+    if sum(1 for b in set(bases) if b != nullrev) > 1:
         unwanted = [None, None]  # unwanted[i]: unwanted revs if choose bases[i]
         for i, base in enumerate(bases):
             if base == nullrev:
@@ -1852,7 +1822,7 @@
 
 
 def isagitpatch(repo, patchname):
-    b'Return true if the given patch is in git format'
+    """Return true if the given patch is in git format"""
     mqpatch = os.path.join(repo.mq.path, patchname)
     for line in patch.linereader(open(mqpatch, b'rb')):
         if line.startswith(b'diff --git'):
@@ -1861,7 +1831,7 @@
 
 
 def updatemq(repo, state, skipped, **opts):
-    b'Update rebased mq patches - finalize and then import them'
+    """Update rebased mq patches - finalize and then import them"""
     mqrebase = {}
     mq = repo.mq
     original_series = mq.fullseries[:]
@@ -1915,7 +1885,7 @@
 
 
 def storecollapsemsg(repo, collapsemsg):
-    b'Store the collapse message to allow recovery'
+    """Store the collapse message to allow recovery"""
     collapsemsg = collapsemsg or b''
     f = repo.vfs(b"last-message.txt", b"w")
     f.write(b"%s\n" % collapsemsg)
@@ -1923,12 +1893,12 @@
 
 
 def clearcollapsemsg(repo):
-    b'Remove collapse message file'
+    """Remove collapse message file"""
     repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
 
 
 def restorecollapsemsg(repo, isabort):
-    b'Restore previously stored collapse message'
+    """Restore previously stored collapse message"""
     try:
         f = repo.vfs(b"last-message.txt")
         collapsemsg = f.readline().strip()
@@ -1945,7 +1915,7 @@
 
 
 def clearstatus(repo):
-    b'Remove the status files'
+    """Remove the status files"""
     # Make sure the active transaction won't write the state file
     tr = repo.currenttransaction()
     if tr:
@@ -1953,25 +1923,6 @@
     repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
 
 
-def needupdate(repo, state):
-    '''check whether we should `update --clean` away from a merge, or if
-    somehow the working dir got forcibly updated, e.g. by older hg'''
-    parents = [p.rev() for p in repo[None].parents()]
-
-    # Are we in a merge state at all?
-    if len(parents) < 2:
-        return False
-
-    # We should be standing on the first as-of-yet unrebased commit.
-    firstunrebased = min(
-        [old for old, new in pycompat.iteritems(state) if new == nullrev]
-    )
-    if firstunrebased in parents:
-        return True
-
-    return False
-
-
 def sortsource(destmap):
     """yield source revisions in an order that we only rebase things once
 
@@ -2126,16 +2077,16 @@
 
 
 def pullrebase(orig, ui, repo, *args, **opts):
-    b'Call rebase after pull if the latter has been invoked with --rebase'
-    if opts.get(r'rebase'):
+    """Call rebase after pull if the latter has been invoked with --rebase"""
+    if opts.get('rebase'):
         if ui.configbool(b'commands', b'rebase.requiredest'):
             msg = _(b'rebase destination required by configuration')
             hint = _(b'use hg pull followed by hg rebase -d DEST')
             raise error.Abort(msg, hint=hint)
 
         with repo.wlock(), repo.lock():
-            if opts.get(r'update'):
-                del opts[r'update']
+            if opts.get('update'):
+                del opts['update']
                 ui.debug(
                     b'--update and --rebase are not compatible, ignoring '
                     b'the update flag\n'
@@ -2165,15 +2116,15 @@
             if revspostpull > revsprepull:
                 # --rev option from pull conflict with rebase own --rev
                 # dropping it
-                if r'rev' in opts:
-                    del opts[r'rev']
+                if 'rev' in opts:
+                    del opts['rev']
                 # positional argument from pull conflicts with rebase's own
                 # --source.
-                if r'source' in opts:
-                    del opts[r'source']
+                if 'source' in opts:
+                    del opts['source']
                 # revsprepull is the len of the repo, not revnum of tip.
                 destspace = list(repo.changelog.revs(start=revsprepull))
-                opts[r'_destspace'] = destspace
+                opts['_destspace'] = destspace
                 try:
                     rebase(ui, repo, **opts)
                 except error.NoMergeDestAbort:
@@ -2187,7 +2138,7 @@
                         # with warning and trumpets
                         commands.update(ui, repo)
     else:
-        if opts.get(r'tool'):
+        if opts.get('tool'):
             raise error.Abort(_(b'--tool can only be used with --rebase'))
         ret = orig(ui, repo, *args, **opts)
 
@@ -2217,7 +2168,7 @@
 
     assert repo.filtername is None
     cl = repo.changelog
-    nodemap = cl.nodemap
+    get_rev = cl.index.get_rev
     extinctrevs = set(repo.revs(b'extinct()'))
     for srcrev in rebaseobsrevs:
         srcnode = cl.node(srcrev)
@@ -2225,7 +2176,8 @@
         successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
         # obsutil.allsuccessors includes node itself
         successors.remove(srcnode)
-        succrevs = {nodemap[s] for s in successors if s in nodemap}
+        succrevs = {get_rev(s) for s in successors}
+        succrevs.discard(None)
         if succrevs.issubset(extinctrevs):
             # all successors are extinct
             obsoleteextinctsuccessors.add(srcrev)
--- a/hgext/record.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/record.py	Tue Jan 21 13:14:51 2020 -0500
@@ -72,14 +72,14 @@
             _(b'running non-interactively, use %s instead') % b'commit'
         )
 
-    opts[r"interactive"] = True
+    opts["interactive"] = True
     overrides = {(b'experimental', b'crecord'): False}
     with ui.configoverride(overrides, b'record'):
         return commands.commit(ui, repo, *pats, **opts)
 
 
 def qrefresh(origfn, ui, repo, *pats, **opts):
-    if not opts[r'interactive']:
+    if not opts['interactive']:
         return origfn(ui, repo, *pats, **opts)
 
     mq = extensions.find(b'mq')
@@ -123,7 +123,7 @@
     repo.mq.checkpatchname(patch)
 
     def committomq(ui, repo, *pats, **opts):
-        opts[r'checkname'] = False
+        opts['checkname'] = False
         mq.new(ui, repo, patch, *pats, **opts)
 
     overrides = {(b'experimental', b'crecord'): False}
@@ -142,7 +142,7 @@
 
 
 def qnew(origfn, ui, repo, patch, *args, **opts):
-    if opts[r'interactive']:
+    if opts['interactive']:
         return _qrecord(None, ui, repo, patch, *args, **opts)
     return origfn(ui, repo, patch, *args, **opts)
 
--- a/hgext/releasenotes.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/releasenotes.py	Tue Jan 21 13:14:51 2020 -0500
@@ -20,6 +20,7 @@
 from mercurial.i18n import _
 from mercurial.pycompat import open
 from mercurial import (
+    cmdutil,
     config,
     error,
     minirst,
@@ -653,14 +654,9 @@
     opts = pycompat.byteskwargs(opts)
     sections = releasenotessections(ui, repo)
 
-    listflag = opts.get(b'list')
+    cmdutil.check_incompatible_arguments(opts, b'list', b'rev', b'check')
 
-    if listflag and opts.get(b'rev'):
-        raise error.Abort(_(b'cannot use both \'--list\' and \'--rev\''))
-    if listflag and opts.get(b'check'):
-        raise error.Abort(_(b'cannot use both \'--list\' and \'--check\''))
-
-    if listflag:
+    if opts.get(b'list'):
         return _getadmonitionlist(ui, sections)
 
     rev = opts.get(b'rev')
--- a/hgext/remotefilelog/__init__.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/__init__.py	Tue Jan 21 13:14:51 2020 -0500
@@ -288,7 +288,7 @@
 
     # Prevent 'hg manifest --all'
     def _manifest(orig, ui, repo, *args, **opts):
-        if isenabled(repo) and opts.get(r'all'):
+        if isenabled(repo) and opts.get('all'):
             raise error.Abort(_(b"--all is not supported in a shallow repo"))
 
         return orig(ui, repo, *args, **opts)
@@ -344,7 +344,7 @@
 
 
 def cloneshallow(orig, ui, repo, *args, **opts):
-    if opts.get(r'shallow'):
+    if opts.get('shallow'):
         repos = []
 
         def pull_shallow(orig, self, *args, **kwargs):
@@ -381,13 +381,9 @@
                 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
                     opts = {}
                     if repo.includepattern:
-                        opts[r'includepattern'] = b'\0'.join(
-                            repo.includepattern
-                        )
+                        opts['includepattern'] = b'\0'.join(repo.includepattern)
                     if repo.excludepattern:
-                        opts[r'excludepattern'] = b'\0'.join(
-                            repo.excludepattern
-                        )
+                        opts['excludepattern'] = b'\0'.join(repo.excludepattern)
                     return remote._callstream(b'stream_out_shallow', **opts)
                 else:
                     return orig()
@@ -424,7 +420,7 @@
     try:
         orig(ui, repo, *args, **opts)
     finally:
-        if opts.get(r'shallow'):
+        if opts.get('shallow'):
             for r in repos:
                 if util.safehasattr(r, b'fileservice'):
                     r.fileservice.close()
@@ -723,9 +719,9 @@
         remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
     )
 
-    def changelogadd(orig, self, *args):
+    def changelogadd(orig, self, *args, **kwargs):
         oldlen = len(self)
-        node = orig(self, *args)
+        node = orig(self, *args, **kwargs)
         newlen = len(self)
         if oldlen != newlen:
             for oldargs in pendingfilecommits:
@@ -991,14 +987,14 @@
     if not isenabled(repo):
         return orig(ui, repo, *pats, **opts)
 
-    follow = opts.get(r'follow')
-    revs = opts.get(r'rev')
+    follow = opts.get('follow')
+    revs = opts.get('rev')
     if pats:
         # Force slowpath for non-follow patterns and follows that start from
         # non-working-copy-parent revs.
         if not follow or revs:
             # This forces the slowpath
-            opts[r'removed'] = True
+            opts['removed'] = True
 
         # If this is a non-follow log without any revs specified, recommend that
         # the user add -f to speed it up.
@@ -1067,7 +1063,7 @@
     # update a revset with a date limit
     bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
 
-    def anon():
+    def anon(unused_success):
         if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
             return
         repo.ranprefetch = True
@@ -1268,18 +1264,18 @@
     _(b'hg repack [OPTIONS]'),
 )
 def repack_(ui, repo, *pats, **opts):
-    if opts.get(r'background'):
+    if opts.get('background'):
         repackmod.backgroundrepack(
             repo,
-            incremental=opts.get(r'incremental'),
-            packsonly=opts.get(r'packsonly', False),
+            incremental=opts.get('incremental'),
+            packsonly=opts.get('packsonly', False),
         )
         return
 
-    options = {b'packsonly': opts.get(r'packsonly')}
+    options = {b'packsonly': opts.get('packsonly')}
 
     try:
-        if opts.get(r'incremental'):
+        if opts.get('incremental'):
             repackmod.incrementalrepack(repo, options=options)
         else:
             repackmod.fullrepack(repo, options=options)
--- a/hgext/remotefilelog/basepack.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/basepack.py	Tue Jan 21 13:14:51 2020 -0500
@@ -2,7 +2,6 @@
 
 import collections
 import errno
-import hashlib
 import mmap
 import os
 import struct
@@ -20,9 +19,10 @@
     util,
     vfs as vfsmod,
 )
+from mercurial.utils import hashutil
 from . import shallowutil
 
-osutil = policy.importmod(r'osutil')
+osutil = policy.importmod('osutil')
 
 # The pack version supported by this implementation. This will need to be
 # rev'd whenever the byte format changes. Ex: changing the fanout prefix,
@@ -390,9 +390,9 @@
         self.idxfp, self.idxpath = opener.mkstemp(
             suffix=self.INDEXSUFFIX + b'-tmp'
         )
-        self.packfp = os.fdopen(self.packfp, r'wb+')
-        self.idxfp = os.fdopen(self.idxfp, r'wb+')
-        self.sha = hashlib.sha1()
+        self.packfp = os.fdopen(self.packfp, 'wb+')
+        self.idxfp = os.fdopen(self.idxfp, 'wb+')
+        self.sha = hashutil.sha1()
         self._closed = False
 
         # The opener provides no way of doing permission fixup on files created
@@ -530,11 +530,11 @@
 
 class indexparams(object):
     __slots__ = (
-        r'fanoutprefix',
-        r'fanoutstruct',
-        r'fanoutcount',
-        r'fanoutsize',
-        r'indexstart',
+        'fanoutprefix',
+        'fanoutstruct',
+        'fanoutcount',
+        'fanoutsize',
+        'indexstart',
     )
 
     def __init__(self, prefixsize, version):
--- a/hgext/remotefilelog/basestore.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/basestore.py	Tue Jan 21 13:14:51 2020 -0500
@@ -1,7 +1,6 @@
 from __future__ import absolute_import
 
 import errno
-import hashlib
 import os
 import shutil
 import stat
@@ -15,6 +14,7 @@
     pycompat,
     util,
 )
+from mercurial.utils import hashutil
 from . import (
     constants,
     shallowutil,
@@ -166,7 +166,7 @@
 
         # Start with a full manifest, since it'll cover the majority of files
         for filename in self.repo[b'tip'].manifest():
-            sha = hashlib.sha1(filename).digest()
+            sha = hashutil.sha1(filename).digest()
             if sha in missingfilename:
                 filenames[filename] = sha
                 missingfilename.discard(sha)
@@ -178,7 +178,7 @@
                 break
             files = cl.readfiles(cl.node(rev))
             for filename in files:
-                sha = hashlib.sha1(filename).digest()
+                sha = hashutil.sha1(filename).digest()
                 if sha in missingfilename:
                     filenames[filename] = sha
                     missingfilename.discard(sha)
@@ -225,7 +225,7 @@
             data = shallowutil.readfile(filepath)
             if self._validatecache and not self._validatedata(data, filepath):
                 if self._validatecachelog:
-                    with open(self._validatecachelog, b'a+') as f:
+                    with open(self._validatecachelog, b'ab+') as f:
                         f.write(b"corrupt %s during read\n" % filepath)
                 os.rename(filepath, filepath + b".corrupt")
                 raise KeyError(b"corrupt local cache file %s" % filepath)
@@ -420,10 +420,10 @@
         # throw a KeyError, try this many times with a full refresh between
         # attempts. A repack operation may have moved data from one store to
         # another while we were running.
-        self.numattempts = kwargs.get(r'numretries', 0) + 1
+        self.numattempts = kwargs.get('numretries', 0) + 1
         # If not-None, call this function on every retry and if the attempts are
         # exhausted.
-        self.retrylog = kwargs.get(r'retrylog', None)
+        self.retrylog = kwargs.get('retrylog', None)
 
     def markforrefresh(self):
         for store in self.stores:
--- a/hgext/remotefilelog/contentstore.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/contentstore.py	Tue Jan 21 13:14:51 2020 -0500
@@ -40,12 +40,12 @@
         super(unioncontentstore, self).__init__(*args, **kwargs)
 
         self.stores = args
-        self.writestore = kwargs.get(r'writestore')
+        self.writestore = kwargs.get('writestore')
 
         # If allowincomplete==True then the union store can return partial
         # delta chains, otherwise it will throw a KeyError if a full
         # deltachain can't be found.
-        self.allowincomplete = kwargs.get(r'allowincomplete', False)
+        self.allowincomplete = kwargs.get('allowincomplete', False)
 
     def get(self, name, node):
         """Fetches the full text revision contents of the given name+node pair.
--- a/hgext/remotefilelog/debugcommands.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/debugcommands.py	Tue Jan 21 13:14:51 2020 -0500
@@ -6,7 +6,6 @@
 # GNU General Public License version 2 or any later version.
 from __future__ import absolute_import
 
-import hashlib
 import os
 import zlib
 
@@ -21,6 +20,7 @@
     pycompat,
     revlog,
 )
+from mercurial.utils import hashutil
 from . import (
     constants,
     datapack,
@@ -32,7 +32,7 @@
 
 
 def debugremotefilelog(ui, path, **opts):
-    decompress = opts.get(r'decompress')
+    decompress = opts.get('decompress')
 
     size, firstnode, mapping = parsefileblob(path, decompress)
 
@@ -61,7 +61,7 @@
 
 def buildtemprevlog(repo, file):
     # get filename key
-    filekey = nodemod.hex(hashlib.sha1(file).digest())
+    filekey = nodemod.hex(hashutil.sha1(file).digest())
     filedir = os.path.join(repo.path, b'store/data', filekey)
 
     # sort all entries based on linkrev
@@ -101,9 +101,9 @@
 def debugindex(orig, ui, repo, file_=None, **opts):
     """dump the contents of an index file"""
     if (
-        opts.get(r'changelog')
-        or opts.get(r'manifest')
-        or opts.get(r'dir')
+        opts.get('changelog')
+        or opts.get('manifest')
+        or opts.get('dir')
         or not shallowutil.isenabled(repo)
         or not repo.shallowmatch(file_)
     ):
@@ -199,7 +199,7 @@
 
 
 def verifyremotefilelog(ui, path, **opts):
-    decompress = opts.get(r'decompress')
+    decompress = opts.get('decompress')
 
     for root, dirs, files in os.walk(path):
         for file in files:
@@ -262,13 +262,13 @@
             path = path[: path.index(b'.data')]
         ui.write(b"%s:\n" % path)
         dpack = datapack.datapack(path)
-        node = opts.get(r'node')
+        node = opts.get('node')
         if node:
             deltachain = dpack.getdeltachain(b'', bin(node))
             dumpdeltachain(ui, deltachain, **opts)
             return
 
-        if opts.get(r'long'):
+        if opts.get('long'):
             hashformatter = hex
             hashlen = 42
         else:
@@ -421,7 +421,7 @@
             % (
                 hashformatter(node),
                 hashformatter(deltabasenode),
-                nodemod.hex(hashlib.sha1(delta).digest()),
+                nodemod.hex(hashutil.sha1(delta).digest()),
                 len(delta),
             )
         )
--- a/hgext/remotefilelog/fileserverclient.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/fileserverclient.py	Tue Jan 21 13:14:51 2020 -0500
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import hashlib
 import io
 import os
 import threading
@@ -25,7 +24,10 @@
     util,
     wireprotov1peer,
 )
-from mercurial.utils import procutil
+from mercurial.utils import (
+    hashutil,
+    procutil,
+)
 
 from . import (
     constants,
@@ -45,12 +47,12 @@
 
 
 def getcachekey(reponame, file, id):
-    pathhash = node.hex(hashlib.sha1(file).digest())
+    pathhash = node.hex(hashutil.sha1(file).digest())
     return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
 
 
 def getlocalkey(file, id):
-    pathhash = node.hex(hashlib.sha1(file).digest())
+    pathhash = node.hex(hashutil.sha1(file).digest())
     return os.path.join(pathhash, id)
 
 
@@ -663,5 +665,5 @@
         self.ui.log(
             b'remotefilelog',
             b'excess remotefilelog fetching:\n%s\n',
-            b''.join(traceback.format_stack()),
+            b''.join(pycompat.sysbytes(s) for s in traceback.format_stack()),
         )
--- a/hgext/remotefilelog/historypack.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/historypack.py	Tue Jan 21 13:14:51 2020 -0500
@@ -1,6 +1,5 @@
 from __future__ import absolute_import
 
-import hashlib
 import struct
 
 from mercurial.node import hex, nullid
@@ -8,6 +7,7 @@
     pycompat,
     util,
 )
+from mercurial.utils import hashutil
 from . import (
     basepack,
     constants,
@@ -197,7 +197,7 @@
 
     def _findsection(self, name):
         params = self.params
-        namehash = hashlib.sha1(name).digest()
+        namehash = hashutil.sha1(name).digest()
         fanoutkey = struct.unpack(
             params.fanoutstruct, namehash[: params.fanoutprefix]
         )[0]
@@ -499,7 +499,7 @@
 
             # Record metadata for the index
             self.files[filename] = (sectionstart, sectionlen)
-            node = hashlib.sha1(filename).digest()
+            node = hashutil.sha1(filename).digest()
             self.entries[node] = node
 
     def close(self, ledger=None):
@@ -517,7 +517,7 @@
         nodeindexlength = self.NODEINDEXENTRYLENGTH
 
         files = (
-            (hashlib.sha1(filename).digest(), filename, offset, size)
+            (hashutil.sha1(filename).digest(), filename, offset, size)
             for filename, (offset, size) in pycompat.iteritems(self.files)
         )
         files = sorted(files)
--- a/hgext/remotefilelog/metadatastore.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/metadatastore.py	Tue Jan 21 13:14:51 2020 -0500
@@ -12,12 +12,12 @@
         super(unionmetadatastore, self).__init__(*args, **kwargs)
 
         self.stores = args
-        self.writestore = kwargs.get(r'writestore')
+        self.writestore = kwargs.get('writestore')
 
         # If allowincomplete==True then the union store can return partial
         # ancestor lists, otherwise it will throw a KeyError if a full
         # history can't be found.
-        self.allowincomplete = kwargs.get(r'allowincomplete', False)
+        self.allowincomplete = kwargs.get('allowincomplete', False)
 
     def getancestors(self, name, node, known=None):
         """Returns as many ancestors as we're aware of.
--- a/hgext/remotefilelog/remotefilectx.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/remotefilectx.py	Tue Jan 21 13:14:51 2020 -0500
@@ -48,11 +48,11 @@
 
     @propertycache
     def _changeid(self):
-        if r'_changeid' in self.__dict__:
+        if '_changeid' in self.__dict__:
             return self._changeid
-        elif r'_changectx' in self.__dict__:
+        elif '_changectx' in self.__dict__:
             return self._changectx.rev()
-        elif r'_descendantrev' in self.__dict__:
+        elif '_descendantrev' in self.__dict__:
             # this file context was created from a revision with a known
             # descendant, we can (lazily) correct for linkrev aliases
             linknode = self._adjustlinknode(
@@ -83,7 +83,7 @@
 
         ancestormap = self.ancestormap()
         p1, p2, linknode, copyfrom = ancestormap[self._filenode]
-        rev = self._repo.changelog.nodemap.get(linknode)
+        rev = self._repo.changelog.index.get_rev(linknode)
         if rev is not None:
             return rev
 
@@ -119,7 +119,7 @@
         """
         lkr = self.linkrev()
         attrs = vars(self)
-        noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
+        noctx = not ('_changeid' in attrs or r'_changectx' in attrs)
         if noctx or self.rev() == lkr:
             return lkr
         linknode = self._adjustlinknode(
@@ -246,11 +246,11 @@
             return linknode
 
         commonlogkwargs = {
-            r'revs': b' '.join([hex(cl.node(rev)) for rev in revs]),
-            r'fnode': hex(fnode),
-            r'filepath': path,
-            r'user': shallowutil.getusername(repo.ui),
-            r'reponame': shallowutil.getreponame(repo.ui),
+            'revs': b' '.join([hex(cl.node(rev)) for rev in revs]),
+            'fnode': hex(fnode),
+            'filepath': path,
+            'user': shallowutil.getusername(repo.ui),
+            'reponame': shallowutil.getreponame(repo.ui),
         }
 
         repo.ui.log(b'linkrevfixup', b'adjusting linknode\n', **commonlogkwargs)
@@ -439,7 +439,7 @@
 
     def annotate(self, *args, **kwargs):
         introctx = self
-        prefetchskip = kwargs.pop(r'prefetchskip', None)
+        prefetchskip = kwargs.pop('prefetchskip', None)
         if prefetchskip:
             # use introrev so prefetchskip can be accurately tested
             introrev = self.introrev()
--- a/hgext/remotefilelog/repack.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/repack.py	Tue Jan 21 13:14:51 2020 -0500
@@ -29,7 +29,7 @@
     shallowutil,
 )
 
-osutil = policy.importmod(r'osutil')
+osutil = policy.importmod('osutil')
 
 
 class RepackAlreadyRunning(error.Abort):
@@ -878,13 +878,13 @@
     """
 
     __slots__ = (
-        r'filename',
-        r'node',
-        r'datasource',
-        r'historysource',
-        r'datarepacked',
-        r'historyrepacked',
-        r'gced',
+        'filename',
+        'node',
+        'datasource',
+        'historysource',
+        'datarepacked',
+        'historyrepacked',
+        'gced',
     )
 
     def __init__(self, filename, node):
--- a/hgext/remotefilelog/shallowbundle.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/shallowbundle.py	Tue Jan 21 13:14:51 2020 -0500
@@ -153,7 +153,7 @@
     try:
         # if serving, only send files the clients has patterns for
         if source == b'serve':
-            bundlecaps = kwargs.get(r'bundlecaps')
+            bundlecaps = kwargs.get('bundlecaps')
             includepattern = None
             excludepattern = None
             for cap in bundlecaps or []:
--- a/hgext/remotefilelog/shallowutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/remotefilelog/shallowutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -8,7 +8,6 @@
 
 import collections
 import errno
-import hashlib
 import os
 import stat
 import struct
@@ -24,6 +23,7 @@
     util,
 )
 from mercurial.utils import (
+    hashutil,
     storageutil,
     stringutil,
 )
@@ -39,12 +39,12 @@
 
 
 def getcachekey(reponame, file, id):
-    pathhash = node.hex(hashlib.sha1(file).digest())
+    pathhash = node.hex(hashutil.sha1(file).digest())
     return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
 
 
 def getlocalkey(file, id):
-    pathhash = node.hex(hashlib.sha1(file).digest())
+    pathhash = node.hex(hashutil.sha1(file).digest())
     return os.path.join(pathhash, id)
 
 
@@ -260,9 +260,9 @@
             # v0, str(int(size)) is the header
             size = int(header)
     except ValueError:
-        raise RuntimeError(r"unexpected remotefilelog header: illegal format")
+        raise RuntimeError("unexpected remotefilelog header: illegal format")
     if size is None:
-        raise RuntimeError(r"unexpected remotefilelog header: no size found")
+        raise RuntimeError("unexpected remotefilelog header: no size found")
     return index + 1, size, flags
 
 
--- a/hgext/schemes.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/schemes.py	Tue Jan 21 13:14:51 2020 -0500
@@ -63,7 +63,7 @@
 # leave the attribute unspecified.
 testedwith = b'ships-with-hg-core'
 
-_partre = re.compile(br'\{(\d+)\}')
+_partre = re.compile(br'{(\d+)\}')
 
 
 class ShortRepository(object):
--- a/hgext/share.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/share.py	Tue Jan 21 13:14:51 2020 -0500
@@ -122,7 +122,7 @@
     if pool:
         pool = util.expandpath(pool)
 
-    opts[r'shareopts'] = {
+    opts['shareopts'] = {
         b'pool': pool,
         b'mode': ui.config(b'share', b'poolnaming'),
     }
--- a/hgext/sparse.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/sparse.py	Tue Jan 21 13:14:51 2020 -0500
@@ -153,10 +153,10 @@
 
 
 def _clonesparsecmd(orig, ui, repo, *args, **opts):
-    include_pat = opts.get(r'include')
-    exclude_pat = opts.get(r'exclude')
-    enableprofile_pat = opts.get(r'enable_profile')
-    narrow_pat = opts.get(r'narrow')
+    include_pat = opts.get('include')
+    exclude_pat = opts.get('exclude')
+    enableprofile_pat = opts.get('enable_profile')
+    narrow_pat = opts.get('narrow')
     include = exclude = enableprofile = False
     if include_pat:
         pat = include_pat
@@ -209,7 +209,7 @@
     )
 
     def _add(orig, ui, repo, *pats, **opts):
-        if opts.get(r'sparse'):
+        if opts.get('sparse'):
             dirs = set()
             for pat in pats:
                 dirname, basename = util.split(pat)
--- a/hgext/split.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/split.py	Tue Jan 21 13:14:51 2020 -0500
@@ -22,11 +22,10 @@
     commands,
     error,
     hg,
-    obsolete,
-    phases,
     pycompat,
     registrar,
     revsetlang,
+    rewriteutil,
     scmutil,
 )
 
@@ -77,45 +76,26 @@
 
         rev = revs.first()
         ctx = repo[rev]
+        # Handle nullid specially here (instead of leaving for precheck()
+        # below) so we get a nicer message and error code.
         if rev is None or ctx.node() == nullid:
             ui.status(_(b'nothing to split\n'))
             return 1
         if ctx.node() is None:
             raise error.Abort(_(b'cannot split working directory'))
 
-        # rewriteutil.precheck is not very useful here because:
-        # 1. null check is done above and it's more friendly to return 1
-        #    instead of abort
-        # 2. mergestate check is done below by cmdutil.bailifchanged
-        # 3. unstable check is more complex here because of --rebase
-        #
-        # So only "public" check is useful and it's checked directly here.
-        if ctx.phase() == phases.public:
-            raise error.Abort(
-                _(b'cannot split public changeset'),
-                hint=_(b"see 'hg help phases' for details"),
-            )
-
-        descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
-        alloworphaned = obsolete.isenabled(repo, obsolete.allowunstableopt)
         if opts.get(b'rebase'):
             # Skip obsoleted descendants and their descendants so the rebase
             # won't cause conflicts for sure.
+            descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
             torebase = list(
                 repo.revs(
                     b'%ld - (%ld & obsolete())::', descendants, descendants
                 )
             )
-            if not alloworphaned and len(torebase) != len(descendants):
-                raise error.Abort(
-                    _(b'split would leave orphaned changesets behind')
-                )
         else:
-            if not alloworphaned and descendants:
-                raise error.Abort(
-                    _(b'cannot split changeset with children without rebase')
-                )
-            torebase = ()
+            torebase = []
+        rewriteutil.precheck(repo, [rev] + torebase, b'split')
 
         if len(ctx.parents()) > 1:
             raise error.Abort(_(b'cannot split a merge changeset'))
@@ -152,7 +132,9 @@
         scmutil.movedirstate(repo, ctx.p1())
 
     # Any modified, added, removed, deleted result means split is incomplete
-    incomplete = lambda repo: any(repo.status()[:4])
+    def incomplete(repo):
+        st = repo.status()
+        return any((st.modified, st.added, st.removed, st.deleted))
 
     # Main split loop
     while incomplete(repo):
--- a/hgext/sqlitestore.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/sqlitestore.py	Tue Jan 21 13:14:51 2020 -0500
@@ -45,7 +45,6 @@
 
 from __future__ import absolute_import
 
-import hashlib
 import sqlite3
 import struct
 import threading
@@ -75,7 +74,10 @@
     repository,
     util as interfaceutil,
 )
-from mercurial.utils import storageutil
+from mercurial.utils import (
+    hashutil,
+    storageutil,
+)
 
 try:
     from mercurial import zstd
@@ -121,19 +123,19 @@
     # Deltas are stored as content-indexed blobs.
     # compression column holds COMPRESSION_* constant for how the
     # delta is encoded.
-    r'CREATE TABLE delta ('
-    r'    id INTEGER PRIMARY KEY, '
-    r'    compression INTEGER NOT NULL, '
-    r'    hash BLOB UNIQUE ON CONFLICT ABORT, '
-    r'    delta BLOB NOT NULL '
-    r')',
+    'CREATE TABLE delta ('
+    '    id INTEGER PRIMARY KEY, '
+    '    compression INTEGER NOT NULL, '
+    '    hash BLOB UNIQUE ON CONFLICT ABORT, '
+    '    delta BLOB NOT NULL '
+    ')',
     # Tracked paths are denormalized to integers to avoid redundant
     # storage of the path name.
-    r'CREATE TABLE filepath ('
-    r'    id INTEGER PRIMARY KEY, '
-    r'    path BLOB NOT NULL '
-    r')',
-    r'CREATE UNIQUE INDEX filepath_path ' r'    ON filepath (path)',
+    'CREATE TABLE filepath ('
+    '    id INTEGER PRIMARY KEY, '
+    '    path BLOB NOT NULL '
+    ')',
+    'CREATE UNIQUE INDEX filepath_path ON filepath (path)',
     # We have a single table for all file revision data.
     # Each file revision is uniquely described by a (path, rev) and
     # (path, node).
@@ -145,39 +147,38 @@
     #
     # flags column holds bitwise integer flags controlling storage options.
     # These flags are defined by the FLAG_* constants.
-    r'CREATE TABLE fileindex ('
-    r'    id INTEGER PRIMARY KEY, '
-    r'    pathid INTEGER REFERENCES filepath(id), '
-    r'    revnum INTEGER NOT NULL, '
-    r'    p1rev INTEGER NOT NULL, '
-    r'    p2rev INTEGER NOT NULL, '
-    r'    linkrev INTEGER NOT NULL, '
-    r'    flags INTEGER NOT NULL, '
-    r'    deltaid INTEGER REFERENCES delta(id), '
-    r'    deltabaseid INTEGER REFERENCES fileindex(id), '
-    r'    node BLOB NOT NULL '
-    r')',
-    r'CREATE UNIQUE INDEX fileindex_pathrevnum '
-    r'    ON fileindex (pathid, revnum)',
-    r'CREATE UNIQUE INDEX fileindex_pathnode '
-    r'    ON fileindex (pathid, node)',
+    'CREATE TABLE fileindex ('
+    '    id INTEGER PRIMARY KEY, '
+    '    pathid INTEGER REFERENCES filepath(id), '
+    '    revnum INTEGER NOT NULL, '
+    '    p1rev INTEGER NOT NULL, '
+    '    p2rev INTEGER NOT NULL, '
+    '    linkrev INTEGER NOT NULL, '
+    '    flags INTEGER NOT NULL, '
+    '    deltaid INTEGER REFERENCES delta(id), '
+    '    deltabaseid INTEGER REFERENCES fileindex(id), '
+    '    node BLOB NOT NULL '
+    ')',
+    'CREATE UNIQUE INDEX fileindex_pathrevnum '
+    '    ON fileindex (pathid, revnum)',
+    'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)',
     # Provide a view over all file data for convenience.
-    r'CREATE VIEW filedata AS '
-    r'SELECT '
-    r'    fileindex.id AS id, '
-    r'    filepath.id AS pathid, '
-    r'    filepath.path AS path, '
-    r'    fileindex.revnum AS revnum, '
-    r'    fileindex.node AS node, '
-    r'    fileindex.p1rev AS p1rev, '
-    r'    fileindex.p2rev AS p2rev, '
-    r'    fileindex.linkrev AS linkrev, '
-    r'    fileindex.flags AS flags, '
-    r'    fileindex.deltaid AS deltaid, '
-    r'    fileindex.deltabaseid AS deltabaseid '
-    r'FROM filepath, fileindex '
-    r'WHERE fileindex.pathid=filepath.id',
-    r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
+    'CREATE VIEW filedata AS '
+    'SELECT '
+    '    fileindex.id AS id, '
+    '    filepath.id AS pathid, '
+    '    filepath.path AS path, '
+    '    fileindex.revnum AS revnum, '
+    '    fileindex.node AS node, '
+    '    fileindex.p1rev AS p1rev, '
+    '    fileindex.p2rev AS p2rev, '
+    '    fileindex.linkrev AS linkrev, '
+    '    fileindex.flags AS flags, '
+    '    fileindex.deltaid AS deltaid, '
+    '    fileindex.deltabaseid AS deltabaseid '
+    'FROM filepath, fileindex '
+    'WHERE fileindex.pathid=filepath.id',
+    'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
 ]
 
 
@@ -190,22 +191,22 @@
     # baseid "poisoned" to null and limited the recursive filter to
     # "is not null".
     res = db.execute(
-        r'WITH RECURSIVE '
-        r'    deltachain(deltaid, baseid) AS ('
-        r'        SELECT deltaid, deltabaseid FROM fileindex '
-        r'            WHERE pathid=? AND node=? '
-        r'        UNION ALL '
-        r'        SELECT fileindex.deltaid, deltabaseid '
-        r'            FROM fileindex, deltachain '
-        r'            WHERE '
-        r'                fileindex.id=deltachain.baseid '
-        r'                AND deltachain.baseid IS NOT NULL '
-        r'                AND fileindex.id NOT IN ({stops}) '
-        r'    ) '
-        r'SELECT deltachain.baseid, compression, delta '
-        r'FROM deltachain, delta '
-        r'WHERE delta.id=deltachain.deltaid'.format(
-            stops=r','.join([r'?'] * len(stoprids))
+        'WITH RECURSIVE '
+        '    deltachain(deltaid, baseid) AS ('
+        '        SELECT deltaid, deltabaseid FROM fileindex '
+        '            WHERE pathid=? AND node=? '
+        '        UNION ALL '
+        '        SELECT fileindex.deltaid, deltabaseid '
+        '            FROM fileindex, deltachain '
+        '            WHERE '
+        '                fileindex.id=deltachain.baseid '
+        '                AND deltachain.baseid IS NOT NULL '
+        '                AND fileindex.id NOT IN ({stops}) '
+        '    ) '
+        'SELECT deltachain.baseid, compression, delta '
+        'FROM deltachain, delta '
+        'WHERE delta.id=deltachain.deltaid'.format(
+            stops=','.join(['?'] * len(stoprids))
         ),
         tuple([pathid, node] + list(stoprids.keys())),
     )
@@ -249,13 +250,12 @@
 def insertdelta(db, compression, hash, delta):
     try:
         return db.execute(
-            r'INSERT INTO delta (compression, hash, delta) '
-            r'VALUES (?, ?, ?)',
+            'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)',
             (compression, hash, delta),
         ).lastrowid
     except sqlite3.IntegrityError:
         return db.execute(
-            r'SELECT id FROM delta WHERE hash=?', (hash,)
+            'SELECT id FROM delta WHERE hash=?', (hash,)
         ).fetchone()[0]
 
 
@@ -335,7 +335,7 @@
 
         res = list(
             self._db.execute(
-                r'SELECT id FROM filepath WHERE path=?', (self._path,)
+                'SELECT id FROM filepath WHERE path=?', (self._path,)
             )
         )
 
@@ -346,10 +346,10 @@
         self._pathid = res[0][0]
 
         res = self._db.execute(
-            r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
-            r'FROM fileindex '
-            r'WHERE pathid=? '
-            r'ORDER BY revnum ASC',
+            'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
+            'FROM fileindex '
+            'WHERE pathid=? '
+            'ORDER BY revnum ASC',
             (self._pathid,),
         )
 
@@ -496,11 +496,11 @@
         rev = self.rev(node)
 
         res = self._db.execute(
-            r'SELECT'
-            r'  node '
-            r'  FROM filedata '
-            r'  WHERE path=? AND (p1rev=? OR p2rev=?) '
-            r'  ORDER BY revnum ASC',
+            'SELECT'
+            '  node '
+            '  FROM filedata '
+            '  WHERE path=? AND (p1rev=? OR p2rev=?) '
+            '  ORDER BY revnum ASC',
             (self._path, rev, rev),
         )
 
@@ -598,9 +598,9 @@
 
         # TODO perform in a single query.
         res = self._db.execute(
-            r'SELECT revnum, deltaid FROM fileindex '
-            r'WHERE pathid=? '
-            r'    AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
+            'SELECT revnum, deltaid FROM fileindex '
+            'WHERE pathid=? '
+            '    AND node in (%s)' % (','.join(['?'] * len(nodes))),
             tuple([self._pathid] + nodes),
         )
 
@@ -608,7 +608,7 @@
 
         for rev, deltaid in res:
             res = self._db.execute(
-                r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
+                'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
                 (self._pathid, deltaid),
             )
             deltabases[rev] = res.fetchone()[0]
@@ -726,7 +726,7 @@
                     entry.flags &= ~FLAG_MISSING_P1
 
                     self._db.execute(
-                        r'UPDATE fileindex SET p1rev=?, flags=? ' r'WHERE id=?',
+                        'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?',
                         (self._nodetorev[p1], entry.flags, entry.rid),
                     )
 
@@ -736,7 +736,7 @@
                     entry.flags &= ~FLAG_MISSING_P2
 
                     self._db.execute(
-                        r'UPDATE fileindex SET p2rev=?, flags=? ' r'WHERE id=?',
+                        'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?',
                         (self._nodetorev[p1], entry.flags, entry.rid),
                     )
 
@@ -787,7 +787,7 @@
 
         # Find the delta to be censored.
         censoreddeltaid = self._db.execute(
-            r'SELECT deltaid FROM fileindex WHERE id=?',
+            'SELECT deltaid FROM fileindex WHERE id=?',
             (self._revisions[censornode].rid,),
         ).fetchone()[0]
 
@@ -796,8 +796,8 @@
         # for those delta chains too.
         rows = list(
             self._db.execute(
-                r'SELECT id, pathid, node FROM fileindex '
-                r'WHERE deltabaseid=? OR deltaid=?',
+                'SELECT id, pathid, node FROM fileindex '
+                'WHERE deltabaseid=? OR deltaid=?',
                 (censoreddeltaid, censoreddeltaid),
             )
         )
@@ -809,7 +809,7 @@
                 self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
             )
 
-            deltahash = hashlib.sha1(fulltext).digest()
+            deltahash = hashutil.sha1(fulltext).digest()
 
             if self._compengine == b'zstd':
                 deltablob = self._cctx.compress(fulltext)
@@ -832,14 +832,14 @@
             deltaid = insertdelta(self._db, compression, deltahash, deltablob)
 
             self._db.execute(
-                r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
-                r'WHERE id=?',
+                'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
+                'WHERE id=?',
                 (deltaid, rid),
             )
 
         # Now create the tombstone delta and replace the delta on the censored
         # node.
-        deltahash = hashlib.sha1(tombstone).digest()
+        deltahash = hashutil.sha1(tombstone).digest()
         tombstonedeltaid = insertdelta(
             self._db, COMPRESSION_NONE, deltahash, tombstone
         )
@@ -848,12 +848,12 @@
         flags |= FLAG_CENSORED
 
         self._db.execute(
-            r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
-            r'WHERE pathid=? AND node=?',
+            'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
+            'WHERE pathid=? AND node=?',
             (flags, tombstonedeltaid, self._pathid, censornode),
         )
 
-        self._db.execute(r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
+        self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,))
 
         self._refreshindex()
         self._revisioncache.clear()
@@ -878,7 +878,7 @@
 
         for rev in self.revs(rev):
             self._db.execute(
-                r'DELETE FROM fileindex WHERE pathid=? AND node=?',
+                'DELETE FROM fileindex WHERE pathid=? AND node=?',
                 (self._pathid, self.node(rev)),
             )
 
@@ -971,7 +971,7 @@
     ):
         if self._pathid is None:
             res = self._db.execute(
-                r'INSERT INTO filepath (path) VALUES (?)', (self._path,)
+                'INSERT INTO filepath (path) VALUES (?)', (self._path,)
             )
             self._pathid = res.lastrowid
 
@@ -1006,7 +1006,7 @@
         # us to de-duplicate. The table is configured to ignore conflicts
         # and it is faster to just insert and silently noop than to look
         # first.
-        deltahash = hashlib.sha1(delta).digest()
+        deltahash = hashutil.sha1(delta).digest()
 
         if self._compengine == b'zstd':
             deltablob = self._cctx.compress(delta)
@@ -1042,10 +1042,10 @@
             p2rev = self._nodetorev[p2]
 
         rid = self._db.execute(
-            r'INSERT INTO fileindex ('
-            r'    pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
-            r'    deltaid, deltabaseid) '
-            r'    VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
+            'INSERT INTO fileindex ('
+            '    pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
+            '    deltaid, deltabaseid) '
+            '    VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
             (
                 self._pathid,
                 rev,
@@ -1090,7 +1090,7 @@
         if current:
             return tr
 
-        self._dbconn.execute(r'BEGIN TRANSACTION')
+        self._dbconn.execute('BEGIN TRANSACTION')
 
         def committransaction(_):
             self._dbconn.commit()
@@ -1122,7 +1122,7 @@
     db = sqlite3.connect(encoding.strfromlocal(path))
     db.text_factory = bytes
 
-    res = db.execute(r'PRAGMA user_version').fetchone()[0]
+    res = db.execute('PRAGMA user_version').fetchone()[0]
 
     # New database.
     if res == 0:
@@ -1137,7 +1137,7 @@
     else:
         raise error.Abort(_(b'sqlite database has unrecognized version'))
 
-    db.execute(r'PRAGMA journal_mode=WAL')
+    db.execute('PRAGMA journal_mode=WAL')
 
     return db
 
--- a/hgext/transplant.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/transplant.py	Tue Jan 21 13:14:51 2020 -0500
@@ -235,7 +235,7 @@
                     patchfile = None
                 else:
                     fd, patchfile = pycompat.mkstemp(prefix=b'hg-transplant-')
-                    fp = os.fdopen(fd, r'wb')
+                    fp = os.fdopen(fd, 'wb')
                     gen = patch.diff(source, parent, node, opts=diffopts)
                     for chunk in gen:
                         fp.write(chunk)
@@ -290,7 +290,7 @@
         self.ui.status(_(b'filtering %s\n') % patchfile)
         user, date, msg = (changelog[1], changelog[2], changelog[4])
         fd, headerfile = pycompat.mkstemp(prefix=b'hg-transplant-')
-        fp = os.fdopen(fd, r'wb')
+        fp = os.fdopen(fd, 'wb')
         fp.write(b"# HG changeset patch\n")
         fp.write(b"# User %s\n" % user)
         fp.write(b"# Date %d %d\n" % date)
@@ -443,7 +443,13 @@
                 )
             if merge:
                 repo.setparents(p1, parents[1])
-            modified, added, removed, deleted = repo.status()[:4]
+            st = repo.status()
+            modified, added, removed, deleted = (
+                st.modified,
+                st.added,
+                st.removed,
+                st.deleted,
+            )
             if merge or modified or added or removed or deleted:
                 n = repo.commit(
                     message,
@@ -754,22 +760,14 @@
 
     def checkopts(opts, revs):
         if opts.get(b'continue'):
-            if opts.get(b'branch') or opts.get(b'all') or opts.get(b'merge'):
-                raise error.Abort(
-                    _(
-                        b'--continue is incompatible with '
-                        b'--branch, --all and --merge'
-                    )
-                )
+            cmdutil.check_incompatible_arguments(
+                opts, b'continue', b'branch', b'all', b'merge'
+            )
             return
         if opts.get(b'stop'):
-            if opts.get(b'branch') or opts.get(b'all') or opts.get(b'merge'):
-                raise error.Abort(
-                    _(
-                        b'--stop is incompatible with '
-                        b'--branch, --all and --merge'
-                    )
-                )
+            cmdutil.check_incompatible_arguments(
+                opts, b'stop', b'branch', b'all', b'merge'
+            )
             return
         if not (
             opts.get(b'source')
--- a/hgext/uncommit.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/uncommit.py	Tue Jan 21 13:14:51 2020 -0500
@@ -29,11 +29,11 @@
     error,
     node,
     obsutil,
+    pathutil,
     pycompat,
     registrar,
     rewriteutil,
     scmutil,
-    util,
 )
 
 cmdtable = {}
@@ -157,7 +157,8 @@
 
     with repo.wlock(), repo.lock():
 
-        m, a, r, d = repo.status()[:4]
+        st = repo.status()
+        m, a, r, d = st.modified, st.added, st.removed, st.deleted
         isdirtypath = any(set(m + a + r + d) & set(pats))
         allowdirtywcopy = opts[
             b'allow_dirty_working_copy'
@@ -185,7 +186,7 @@
             # if not everything tracked in that directory can be
             # uncommitted.
             if badfiles:
-                badfiles -= {f for f in util.dirs(eligible)}
+                badfiles -= {f for f in pathutil.dirs(eligible)}
 
             for f in sorted(badfiles):
                 if f in s.clean:
--- a/hgext/zeroconf/Zeroconf.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/zeroconf/Zeroconf.py	Tue Jan 21 13:14:51 2020 -0500
@@ -684,7 +684,7 @@
                 break
             t = len & 0xC0
             if t == 0x00:
-                result = r''.join((result, self.readUTF(off, len) + r'.'))
+                result = ''.join((result, self.readUTF(off, len) + '.'))
                 off += len
             elif t == 0xC0:
                 if next < 0:
@@ -1429,7 +1429,7 @@
         self.socket.setsockopt(
             socket.SOL_IP,
             socket.IP_ADD_MEMBERSHIP,
-            socket.inet_aton(_MDNS_ADDR) + socket.inet_aton(r'0.0.0.0'),
+            socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'),
         )
 
         self.listeners = []
@@ -1845,7 +1845,7 @@
             self.socket.setsockopt(
                 socket.SOL_IP,
                 socket.IP_DROP_MEMBERSHIP,
-                socket.inet_aton(_MDNS_ADDR) + socket.inet_aton(r'0.0.0.0'),
+                socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'),
             )
             self.socket.close()
 
--- a/hgext/zeroconf/__init__.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgext/zeroconf/__init__.py	Tue Jan 21 13:14:51 2020 -0500
@@ -6,9 +6,9 @@
 # GNU General Public License version 2 or any later version.
 '''discover and advertise repositories on the local network
 
-Zeroconf-enabled repositories will be announced in a network without
-the need to configure a server or a service. They can be discovered
-without knowing their actual IP address.
+The zeroconf extension will advertise :hg:`serve` instances over
+DNS-SD so that they can be discovered using the :hg:`paths` command
+without knowing the server's IP address.
 
 To allow other people to discover your repository using run
 :hg:`serve` in your repository::
@@ -55,7 +55,7 @@
     # finds external-facing interface without sending any packets (Linux)
     try:
         s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-        s.connect((r'1.0.0.1', 0))
+        s.connect(('1.0.0.1', 0))
         ip = s.getsockname()[0]
         return ip
     except socket.error:
@@ -64,17 +64,17 @@
     # Generic method, sometimes gives useless results
     try:
         dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
-        if r':' in dumbip:
-            dumbip = r'127.0.0.1'
-        if not dumbip.startswith(r'127.'):
+        if ':' in dumbip:
+            dumbip = '127.0.0.1'
+        if not dumbip.startswith('127.'):
             return dumbip
     except (socket.gaierror, socket.herror):
-        dumbip = r'127.0.0.1'
+        dumbip = '127.0.0.1'
 
     # works elsewhere, but actually sends a packet
     try:
         s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-        s.connect((r'1.0.0.1', 1))
+        s.connect(('1.0.0.1', 1))
         ip = s.getsockname()[0]
         return ip
     except socket.error:
@@ -87,20 +87,20 @@
     global server, localip
     if not server:
         ip = getip()
-        if ip.startswith(r'127.'):
+        if ip.startswith('127.'):
             # if we have no internet connection, this can happen.
             return
         localip = socket.inet_aton(ip)
         server = Zeroconf.Zeroconf(ip)
 
-    hostname = socket.gethostname().split(r'.')[0]
-    host = hostname + r".local"
-    name = r"%s-%s" % (hostname, name)
+    hostname = socket.gethostname().split('.')[0]
+    host = hostname + ".local"
+    name = "%s-%s" % (hostname, name)
 
     # advertise to browsers
     svc = Zeroconf.ServiceInfo(
         b'_http._tcp.local.',
-        pycompat.bytestr(name + r'._http._tcp.local.'),
+        pycompat.bytestr(name + '._http._tcp.local.'),
         server=host,
         port=port,
         properties={b'description': desc, b'path': b"/" + path},
@@ -113,7 +113,7 @@
     # advertise to Mercurial clients
     svc = Zeroconf.ServiceInfo(
         b'_hg._tcp.local.',
-        pycompat.bytestr(name + r'._hg._tcp.local.'),
+        pycompat.bytestr(name + '._hg._tcp.local.'),
         server=host,
         port=port,
         properties={b'description': desc, b'path': b"/" + path},
@@ -171,7 +171,7 @@
 
 def getzcpaths():
     ip = getip()
-    if ip.startswith(r'127.'):
+    if ip.startswith('127.'):
         return
     server = Zeroconf.Zeroconf(ip)
     l = listener()
@@ -180,10 +180,10 @@
     server.close()
     for value in l.found.values():
         name = value.name[: value.name.index(b'.')]
-        url = r"http://%s:%s%s" % (
+        url = "http://%s:%s%s" % (
             socket.inet_ntoa(value.address),
             value.port,
-            value.properties.get(r"path", r"/"),
+            value.properties.get("path", "/"),
         )
         yield b"zc-" + name, pycompat.bytestr(url)
 
--- a/hgweb.cgi	Thu Jan 09 14:19:20 2020 -0500
+++ b/hgweb.cgi	Tue Jan 21 13:14:51 2020 -0500
@@ -8,12 +8,15 @@
 
 # Uncomment and adjust if Mercurial is not installed system-wide
 # (consult "installed modules" path from 'hg debuginstall'):
-#import sys; sys.path.insert(0, "/path/to/python/lib")
+# import sys; sys.path.insert(0, "/path/to/python/lib")
 
 # Uncomment to send python tracebacks to the browser if an error occurs:
-#import cgitb; cgitb.enable()
+# import cgitb; cgitb.enable()
+
+from mercurial import demandimport
 
-from mercurial import demandimport; demandimport.enable()
+demandimport.enable()
 from mercurial.hgweb import hgweb, wsgicgi
+
 application = hgweb(config)
 wsgicgi.launch(application)
--- a/i18n/hggettext	Thu Jan 09 14:19:20 2020 -0500
+++ b/i18n/hggettext	Tue Jan 21 13:14:51 2020 -0500
@@ -57,18 +57,22 @@
 
 
 def poentry(path, lineno, s):
-    return ('#: %s:%d\n' % (path, lineno) +
-            'msgid %s\n' % normalize(s) +
-            'msgstr ""\n')
+    return (
+        '#: %s:%d\n' % (path, lineno)
+        + 'msgid %s\n' % normalize(s)
+        + 'msgstr ""\n'
+    )
+
 
 doctestre = re.compile(r'^ +>>> ', re.MULTILINE)
 
+
 def offset(src, doc, name, lineno, default):
     """Compute offset or issue a warning on stdout."""
     # remove doctest part, in order to avoid backslash mismatching
     m = doctestre.search(doc)
     if m:
-        doc = doc[:m.start()]
+        doc = doc[: m.start()]
 
     # Backslashes in doc appear doubled in src.
     end = src.find(doc.replace('\\', '\\\\'))
@@ -76,9 +80,11 @@
         # This can happen if the docstring contains unnecessary escape
         # sequences such as \" in a triple-quoted string. The problem
         # is that \" is turned into " and so doc wont appear in src.
-        sys.stderr.write("%s:%d:warning:"
-                         " unknown docstr offset, assuming %d lines\n"
-                         % (name, lineno, default))
+        sys.stderr.write(
+            "%s:%d:warning:"
+            " unknown docstr offset, assuming %d lines\n"
+            % (name, lineno, default)
+        )
         return default
     else:
         return src.count('\n', 0, end)
@@ -121,7 +127,7 @@
 
     for func, rstrip in functions:
         if func.__doc__:
-            docobj = func # this might be a proxy to provide formatted doc
+            docobj = func  # this might be a proxy to provide formatted doc
             func = getattr(func, '_origfunc', func)
             funcmod = inspect.getmodule(func)
             extra = ''
@@ -155,7 +161,9 @@
     # accidentally import and extract strings from a Mercurial
     # installation mentioned in PYTHONPATH.
     sys.path.insert(0, os.getcwd())
-    from mercurial import demandimport; demandimport.enable()
+    from mercurial import demandimport
+
+    demandimport.enable()
     for path in sys.argv[1:]:
         if path.endswith('.txt'):
             rawtext(path)
--- a/i18n/polib.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/i18n/polib.py	Tue Jan 21 13:14:51 2020 -0500
@@ -551,18 +551,6 @@
         offsets = []
         entries = self.translated_entries()
 
-        # the keys are sorted in the .mo file
-        def cmp(_self, other):
-            # msgfmt compares entries with msgctxt if it exists
-            self_msgid = _self.msgctxt and _self.msgctxt or _self.msgid
-            other_msgid = other.msgctxt and other.msgctxt or other.msgid
-            if self_msgid > other_msgid:
-                return 1
-            elif self_msgid < other_msgid:
-                return -1
-            else:
-                return 0
-
         # add metadata entry
         entries.sort(key=lambda o: o.msgctxt or o.msgid)
         mentry = self.metadata_as_entry()
--- a/i18n/posplit	Thu Jan 09 14:19:20 2020 -0500
+++ b/i18n/posplit	Tue Jan 21 13:14:51 2020 -0500
@@ -11,6 +11,7 @@
 import re
 import sys
 
+
 def addentry(po, entry, cache):
     e = cache.get(entry.msgid)
     if e:
@@ -27,6 +28,7 @@
         po.append(entry)
         cache[entry.msgid] = entry
 
+
 def mkentry(orig, delta, msgid, msgstr):
     entry = polib.POEntry()
     entry.merge(orig)
@@ -35,13 +37,14 @@
     entry.occurrences = [(p, int(l) + delta) for (p, l) in orig.occurrences]
     return entry
 
+
 if __name__ == "__main__":
     po = polib.pofile(sys.argv[1])
 
     cache = {}
     entries = po[:]
     po[:] = []
-    findd = re.compile(r' *\.\. (\w+)::') # for finding directives
+    findd = re.compile(r' *\.\. (\w+)::')  # for finding directives
     for entry in entries:
         msgids = entry.msgid.split(u'\n\n')
         if entry.msgstr:
@@ -65,7 +68,7 @@
                 newentry = mkentry(entry, delta, msgid, msgstr)
                 mdirective = findd.match(msgid)
                 if mdirective:
-                    if not msgid[mdirective.end():].rstrip():
+                    if not msgid[mdirective.end() :].rstrip():
                         # only directive, nothing to translate here
                         delta += 2
                         continue
@@ -77,8 +80,10 @@
                             continue
                         else:
                             # lines following directly, unexpected
-                            print('Warning: text follows line with directive'
-                                  ' %s' % directive)
+                            print(
+                                'Warning: text follows line with directive'
+                                ' %s' % directive
+                            )
                     comment = 'do not translate: .. %s::' % directive
                     if not newentry.comment:
                         newentry.comment = comment
--- a/mercurial/ancestor.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/ancestor.py	Tue Jan 21 13:14:51 2020 -0500
@@ -16,7 +16,7 @@
     pycompat,
 )
 
-parsers = policy.importmod(r'parsers')
+parsers = policy.importmod('parsers')
 
 
 def commonancestorsheads(pfunc, *nodes):
@@ -108,12 +108,12 @@
                 if p == nullrev:
                     continue
                 dp = depth[p]
-                nsp = sp = seen[p]
+                sp = seen[p]
                 if dp <= dv:
                     depth[p] = dv + 1
                     if sp != sv:
                         interesting[sv] += 1
-                        nsp = seen[p] = sv
+                        seen[p] = sv
                         if sp:
                             interesting[sp] -= 1
                             if interesting[sp] == 0:
@@ -331,7 +331,7 @@
 
         Result does not include the null revision."""
         self._parentrevs = pfunc
-        self._initrevs = revs = [r for r in revs if r >= stoprev]
+        self._initrevs = [r for r in revs if r >= stoprev]
         self._stoprev = stoprev
         self._inclusive = inclusive
 
--- a/mercurial/archival.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/archival.py	Tue Jan 21 13:14:51 2020 -0500
@@ -138,8 +138,8 @@
     class GzipFileWithTime(gzip.GzipFile):
         def __init__(self, *args, **kw):
             timestamp = None
-            if r'timestamp' in kw:
-                timestamp = kw.pop(r'timestamp')
+            if 'timestamp' in kw:
+                timestamp = kw.pop('timestamp')
             if timestamp is None:
                 self.timestamp = time.time()
             else:
@@ -154,9 +154,11 @@
                 fname = fname[:-3]
             flags = 0
             if fname:
-                flags = gzip.FNAME
+                flags = gzip.FNAME  # pytype: disable=module-attr
             self.fileobj.write(pycompat.bytechr(flags))
-            gzip.write32u(self.fileobj, int(self.timestamp))
+            gzip.write32u(  # pytype: disable=module-attr
+                self.fileobj, int(self.timestamp)
+            )
             self.fileobj.write(b'\002')
             self.fileobj.write(b'\377')
             if fname:
@@ -179,7 +181,7 @@
                     timestamp=mtime,
                 )
                 self.fileobj = gzfileobj
-                return tarfile.TarFile.taropen(
+                return tarfile.TarFile.taropen(  # pytype: disable=attribute-error
                     name, pycompat.sysstr(mode), gzfileobj
                 )
             else:
@@ -220,7 +222,7 @@
         if isinstance(dest, bytes):
             dest = pycompat.fsdecode(dest)
         self.z = zipfile.ZipFile(
-            dest, r'w', compress and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED
+            dest, 'w', compress and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED
         )
 
         # Python's zipfile module emits deprecation warnings if we try
@@ -234,7 +236,7 @@
 
     def addfile(self, name, mode, islink, data):
         i = zipfile.ZipInfo(pycompat.fsdecode(name), self.date_time)
-        i.compress_type = self.z.compression
+        i.compress_type = self.z.compression  # pytype: disable=attribute-error
         # unzip will not honor unix file modes unless file creator is
         # set to unix (id 3).
         i.create_system = 3
--- a/mercurial/bookmarks.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/bookmarks.py	Tue Jan 21 13:14:51 2020 -0500
@@ -78,7 +78,7 @@
         self._nodemap = nodemap = {}  # node: sorted([refspec, ...])
         self._clean = True
         self._aclean = True
-        nm = repo.changelog.nodemap
+        has_node = repo.changelog.index.has_node
         tonode = bin  # force local lookup
         try:
             with _getbkfile(repo) as bkfile:
@@ -89,7 +89,7 @@
                     try:
                         sha, refspec = line.split(b' ', 1)
                         node = tonode(sha)
-                        if node in nm:
+                        if has_node(node):
                             refspec = encoding.tolocal(refspec)
                             refmap[refspec] = node
                             nrefs = nodemap.get(node)
@@ -953,38 +953,43 @@
     cur = repo[b'.'].node()
     newact = None
     changes = []
-    hiddenrev = None
 
     # unhide revs if any
     if rev:
         repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
 
+    ctx = scmutil.revsingle(repo, rev, None)
+    # bookmarking wdir means creating a bookmark on p1 and activating it
+    activatenew = not inactive and ctx.rev() is None
+    if ctx.node() is None:
+        ctx = ctx.p1()
+    tgt = ctx.node()
+    assert tgt
+
     for mark in names:
         mark = checkformat(repo, mark)
         if newact is None:
             newact = mark
         if inactive and mark == repo._activebookmark:
             deactivate(repo)
-            return
-        tgt = cur
-        if rev:
-            ctx = scmutil.revsingle(repo, rev)
-            if ctx.hidden():
-                hiddenrev = ctx.hex()[:12]
-            tgt = ctx.node()
+            continue
         for bm in marks.checkconflict(mark, force, tgt):
             changes.append((bm, None))
         changes.append((mark, tgt))
 
-    if hiddenrev:
-        repo.ui.warn(_(b"bookmarking hidden changeset %s\n") % hiddenrev)
+    # nothing changed but for the one deactivated above
+    if not changes:
+        return
+
+    if ctx.hidden():
+        repo.ui.warn(_(b"bookmarking hidden changeset %s\n") % ctx.hex()[:12])
 
         if ctx.obsolete():
-            msg = obsutil._getfilteredreason(repo, b"%s" % hiddenrev, ctx)
+            msg = obsutil._getfilteredreason(repo, ctx.hex()[:12], ctx)
             repo.ui.warn(b"(%s)\n" % msg)
 
     marks.applychanges(repo, tr, changes)
-    if not inactive and cur == marks[newact] and not rev:
+    if activatenew and cur == marks[newact]:
         activate(repo, newact)
     elif cur != tgt and newact == repo._activebookmark:
         deactivate(repo)
--- a/mercurial/branchmap.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/branchmap.py	Tue Jan 21 13:14:51 2020 -0500
@@ -27,6 +27,23 @@
     stringutil,
 )
 
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Any,
+        Callable,
+        Dict,
+        Iterable,
+        List,
+        Optional,
+        Set,
+        Tuple,
+        Union,
+    )
+
+    assert any(
+        (Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union,)
+    )
+
 subsettable = repoviewutil.subsettable
 
 calcsize = struct.calcsize
@@ -90,14 +107,14 @@
         clrev = cl.rev
         clbranchinfo = cl.branchinfo
         rbheads = []
-        closed = []
+        closed = set()
         for bheads in pycompat.itervalues(remotebranchmap):
             rbheads += bheads
             for h in bheads:
                 r = clrev(h)
                 b, c = clbranchinfo(r)
                 if c:
-                    closed.append(h)
+                    closed.add(h)
 
         if rbheads:
             rtiprev = max((int(clrev(node)) for node in rbheads))
@@ -124,7 +141,7 @@
 def _unknownnode(node):
     """ raises ValueError when branchcache found a node which does not exists
     """
-    raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
+    raise ValueError('node %s does not exist' % pycompat.sysstr(hex(node)))
 
 
 def _branchcachedesc(repo):
@@ -165,6 +182,7 @@
         closednodes=None,
         hasnode=None,
     ):
+        # type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes,  int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None
         """ hasnode is a function which can be used to verify whether changelog
         has a given node or not. If it's not provided, we assume that every node
         we have exists in changelog """
@@ -260,7 +278,7 @@
             )
             if not bcache.validfor(repo):
                 # invalidate the cache
-                raise ValueError(r'tip differs')
+                raise ValueError('tip differs')
             bcache.load(repo, lineiter)
         except (IOError, OSError):
             return None
@@ -269,7 +287,13 @@
             if repo.ui.debugflag:
                 msg = b'invalid %s: %s\n'
                 repo.ui.debug(
-                    msg % (_branchcachedesc(repo), pycompat.bytestr(inst))
+                    msg
+                    % (
+                        _branchcachedesc(repo),
+                        pycompat.bytestr(
+                            inst  # pytype: disable=wrong-arg-types
+                        ),
+                    )
                 )
             bcache = None
 
@@ -288,7 +312,7 @@
                 continue
             node, state, label = line.split(b" ", 2)
             if state not in b'oc':
-                raise ValueError(r'invalid branch state')
+                raise ValueError('invalid branch state')
             label = encoding.tolocal(label.strip())
             node = bin(node)
             self._entries.setdefault(label, []).append(node)
@@ -640,7 +664,7 @@
         #   self.branchinfo = self._branchinfo
         #
         # Since we now have data in the cache, we need to drop this bypassing.
-        if r'branchinfo' in vars(self):
+        if 'branchinfo' in vars(self):
             del self.branchinfo
 
     def _setcachedata(self, rev, node, branchidx):
--- a/mercurial/bundle2.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/bundle2.py	Tue Jan 21 13:14:51 2020 -0500
@@ -653,7 +653,9 @@
         """add a stream level parameter"""
         if not name:
             raise error.ProgrammingError(b'empty parameter name')
-        if name[0:1] not in pycompat.bytestr(string.ascii_letters):
+        if name[0:1] not in pycompat.bytestr(
+            string.ascii_letters  # pytype: disable=wrong-arg-types
+        ):
             raise error.ProgrammingError(
                 b'non letter first character: %s' % name
             )
@@ -835,9 +837,11 @@
               ignored or failing.
         """
         if not name:
-            raise ValueError(r'empty parameter name')
-        if name[0:1] not in pycompat.bytestr(string.ascii_letters):
-            raise ValueError(r'non letter first character: %s' % name)
+            raise ValueError('empty parameter name')
+        if name[0:1] not in pycompat.bytestr(
+            string.ascii_letters  # pytype: disable=wrong-arg-types
+        ):
+            raise ValueError('non letter first character: %s' % name)
         try:
             handler = b2streamparamsmap[name.lower()]
         except KeyError:
@@ -1141,8 +1145,8 @@
             headerchunk = b''.join(header)
         except TypeError:
             raise TypeError(
-                r'Found a non-bytes trying to '
-                r'build bundle part header: %r' % header
+                'Found a non-bytes trying to '
+                'build bundle part header: %r' % header
             )
         outdebug(ui, b'header chunk size: %i' % len(headerchunk))
         yield _pack(_fpartheadersize, len(headerchunk))
@@ -1793,7 +1797,7 @@
 
 
 def addpartbundlestream2(bundler, repo, **kwargs):
-    if not kwargs.get(r'stream', False):
+    if not kwargs.get('stream', False):
         return
 
     if not streamclone.allowservergeneration(repo):
@@ -1815,8 +1819,8 @@
     bundler.prefercompressed = False
 
     # get the includes and excludes
-    includepats = kwargs.get(r'includepats')
-    excludepats = kwargs.get(r'excludepats')
+    includepats = kwargs.get('includepats')
+    excludepats = kwargs.get('excludepats')
 
     narrowstream = repo.ui.configbool(
         b'experimental', b'server.stream-narrow-clones'
@@ -1985,7 +1989,7 @@
     extrakwargs = {}
     targetphase = inpart.params.get(b'targetphase')
     if targetphase is not None:
-        extrakwargs[r'targetphase'] = int(targetphase)
+        extrakwargs['targetphase'] = int(targetphase)
     ret = _processchangegroup(
         op,
         cg,
@@ -2368,7 +2372,7 @@
 
         if pushkeycompat:
 
-            def runhook():
+            def runhook(unused_success):
                 for hookargs in allhooks:
                     op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
 
--- a/mercurial/bundlerepo.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/bundlerepo.py	Tue Jan 21 13:14:51 2020 -0500
@@ -64,18 +64,18 @@
             start = cgunpacker.tell() - size
 
             link = linkmapper(cs)
-            if node in self.nodemap:
+            if self.index.has_node(node):
                 # this can happen if two branches make the same change
-                self.bundlerevs.add(self.nodemap[node])
+                self.bundlerevs.add(self.index.rev(node))
                 continue
 
             for p in (p1, p2):
-                if p not in self.nodemap:
+                if not self.index.has_node(p):
                     raise error.LookupError(
                         p, self.indexfile, _(b"unknown parent")
                     )
 
-            if deltabase not in self.nodemap:
+            if not self.index.has_node(deltabase):
                 raise LookupError(
                     deltabase, self.indexfile, _(b'unknown delta base')
                 )
@@ -93,7 +93,6 @@
                 node,
             )
             self.index.append(e)
-            self.nodemap[node] = n
             self.bundlerevs.add(n)
             n += 1
 
@@ -331,7 +330,7 @@
         fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
         self.tempfile = temp
 
-        with os.fdopen(fdtemp, r'wb') as fptemp:
+        with os.fdopen(fdtemp, 'wb') as fptemp:
             fptemp.write(header)
             while True:
                 chunk = readfn(2 ** 18)
@@ -393,7 +392,7 @@
         # manifestlog implementation did not consume the manifests from the
         # changegroup (ex: it might be consuming trees from a separate bundle2
         # part instead). So we need to manually consume it.
-        if r'filestart' not in self.__dict__:
+        if 'filestart' not in self.__dict__:
             self._consumemanifest()
 
         return self.filestart
--- a/mercurial/cext/dirs.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/cext/dirs.c	Tue Jan 21 13:14:51 2020 -0500
@@ -9,11 +9,12 @@
 
 #define PY_SSIZE_T_CLEAN
 #include <Python.h>
+#include <string.h>
 
 #include "util.h"
 
 #ifdef IS_PY3K
-#define PYLONG_VALUE(o) ((PyLongObject *)o)->ob_digit[1]
+#define PYLONG_VALUE(o) ((PyLongObject *)o)->ob_digit[0]
 #else
 #define PYLONG_VALUE(o) PyInt_AS_LONG(o)
 #endif
@@ -48,12 +49,19 @@
 	return pos;
 }
 
+/* Mercurial will fail to run on directory hierarchies deeper than
+ * this constant, so we should try and keep this constant as big as
+ * possible.
+ */
+#define MAX_DIRS_DEPTH 2048
+
 static int _addpath(PyObject *dirs, PyObject *path)
 {
 	const char *cpath = PyBytes_AS_STRING(path);
 	Py_ssize_t pos = PyBytes_GET_SIZE(path);
 	PyObject *key = NULL;
 	int ret = -1;
+	size_t num_slashes = 0;
 
 	/* This loop is super critical for performance. That's why we inline
 	 * access to Python structs instead of going through a supported API.
@@ -65,6 +73,20 @@
 	 * unnoticed. */
 	while ((pos = _finddir(cpath, pos - 1)) != -1) {
 		PyObject *val;
+		++num_slashes;
+		if (num_slashes > MAX_DIRS_DEPTH) {
+			PyErr_SetString(PyExc_ValueError,
+			                "Directory hierarchy too deep.");
+			goto bail;
+		}
+
+		/* Sniff for trailing slashes, a marker of an invalid input. */
+		if (pos > 0 && cpath[pos - 1] == '/') {
+			PyErr_SetString(
+			    PyExc_ValueError,
+			    "found invalid consecutive slashes in path");
+			goto bail;
+		}
 
 		key = PyBytes_FromStringAndSize(cpath, pos);
 		if (key == NULL)
--- a/mercurial/cext/manifest.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/cext/manifest.c	Tue Jan 21 13:14:51 2020 -0500
@@ -42,17 +42,17 @@
 #define MANIFEST_TOO_SHORT_LINE -5
 
 /* get the length of the path for a line */
-static size_t pathlen(line *l)
+static Py_ssize_t pathlen(line *l)
 {
 	const char *end = memchr(l->start, '\0', l->len);
-	return (end) ? (size_t)(end - l->start) : l->len;
+	return (end) ? (Py_ssize_t)(end - l->start) : l->len;
 }
 
 /* get the node value of a single line */
 static PyObject *nodeof(line *l)
 {
 	char *s = l->start;
-	ssize_t llen = pathlen(l);
+	Py_ssize_t llen = pathlen(l);
 	PyObject *hash;
 	if (llen + 1 + 40 + 1 > l->len) { /* path '\0' hash '\n' */
 		PyErr_SetString(PyExc_ValueError, "manifest line too short");
@@ -76,7 +76,7 @@
 static PyObject *hashflags(line *l)
 {
 	char *s = l->start;
-	size_t plen = pathlen(l);
+	Py_ssize_t plen = pathlen(l);
 	PyObject *hash = nodeof(l);
 
 	/* 40 for hash, 1 for null byte, 1 for newline */
@@ -270,7 +270,7 @@
 
 static PyObject *lmiter_iterentriesnext(PyObject *o)
 {
-	size_t pl;
+	Py_ssize_t pl;
 	line *l;
 	Py_ssize_t consumed;
 	PyObject *ret = NULL, *path = NULL, *hash = NULL, *flags = NULL;
@@ -337,7 +337,7 @@
 
 static PyObject *lmiter_iterkeysnext(PyObject *o)
 {
-	size_t pl;
+	Py_ssize_t pl;
 	line *l = lmiter_nextline((lmIter *)o);
 	if (!l) {
 		return NULL;
--- a/mercurial/cext/parsers.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/cext/parsers.c	Tue Jan 21 13:14:51 2020 -0500
@@ -667,7 +667,7 @@
 void manifest_module_init(PyObject *mod);
 void revlog_module_init(PyObject *mod);
 
-static const int version = 13;
+static const int version = 16;
 
 static void module_init(PyObject *mod)
 {
--- a/mercurial/cext/revlog.c	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/cext/revlog.c	Tue Jan 21 13:14:51 2020 -0500
@@ -37,6 +37,11 @@
 	int children[16];
 } nodetreenode;
 
+typedef struct {
+	int abi_version;
+	int (*index_parents)(PyObject *, int, int *);
+} Revlog_CAPI;
+
 /*
  * A base-16 trie for fast node->rev mapping.
  *
@@ -62,10 +67,9 @@
  * This class has two behaviors.
  *
  * When used in a list-like way (with integer keys), we decode an
- * entry in a RevlogNG index file on demand. Our last entry is a
- * sentinel, always a nullid.  We have limited support for
+ * entry in a RevlogNG index file on demand. We have limited support for
  * integer-keyed insert and delete, only at elements right before the
- * sentinel.
+ * end.
  *
  * With string keys, we lazily perform a reverse mapping from node to
  * rev, using a base-16 trie.
@@ -2065,6 +2069,29 @@
 	}
 }
 
+static PyObject *index_m_has_node(indexObject *self, PyObject *args)
+{
+	int ret = index_contains(self, args);
+	if (ret < 0)
+		return NULL;
+	return PyBool_FromLong((long)ret);
+}
+
+static PyObject *index_m_rev(indexObject *self, PyObject *val)
+{
+	char *node;
+	int rev;
+
+	if (node_check(val, &node) == -1)
+		return NULL;
+	rev = index_find_node(self, node, 20);
+	if (rev >= -1)
+		return PyInt_FromLong(rev);
+	if (rev == -2)
+		raise_revlog_error();
+	return NULL;
+}
+
 typedef uint64_t bitmask;
 
 /*
@@ -2443,7 +2470,7 @@
 
 /*
  * Delete a numeric range of revs, which must be at the end of the
- * range, but exclude the sentinel nullid entry.
+ * range.
  */
 static int index_slice_del(indexObject *self, PyObject *item)
 {
@@ -2489,7 +2516,7 @@
 		if (self->ntinitialized) {
 			Py_ssize_t i;
 
-			for (i = start + 1; i < self->length; i++) {
+			for (i = start; i < self->length; i++) {
 				const char *node = index_node_existing(self, i);
 				if (node == NULL)
 					return -1;
@@ -2500,7 +2527,10 @@
 				index_invalidate_added(self, 0);
 			if (self->ntrev > start)
 				self->ntrev = (int)start;
+		} else if (self->added) {
+			Py_CLEAR(self->added);
 		}
+
 		self->length = start;
 		if (start < self->raw_length) {
 			if (self->cache) {
@@ -2723,6 +2753,12 @@
     {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
      "clear the index caches"},
     {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
+    {"get_rev", (PyCFunction)index_m_get, METH_VARARGS,
+     "return `rev` associated with a node or None"},
+    {"has_node", (PyCFunction)index_m_has_node, METH_O,
+     "return True if the node exist in the index"},
+    {"rev", (PyCFunction)index_m_rev, METH_O,
+     "return `rev` associated with a node or raise RevlogError"},
     {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
      "compute phases"},
     {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
@@ -3001,6 +3037,13 @@
 };
 #endif /* WITH_RUST */
 
+static Revlog_CAPI CAPI = {
+    /* increment the abi_version field upon each change in the Revlog_CAPI
+       struct or in the ABI of the listed functions */
+    1,
+    HgRevlogIndex_GetParents,
+};
+
 void revlog_module_init(PyObject *mod)
 {
 	PyObject *caps = NULL;
@@ -3024,11 +3067,9 @@
 	if (nullentry)
 		PyObject_GC_UnTrack(nullentry);
 
-	caps = PyCapsule_New(HgRevlogIndex_GetParents,
-	                     "mercurial.cext.parsers.index_get_parents_CAPI",
-	                     NULL);
+	caps = PyCapsule_New(&CAPI, "mercurial.cext.parsers.revlog_CAPI", NULL);
 	if (caps != NULL)
-		PyModule_AddObject(mod, "index_get_parents_CAPI", caps);
+		PyModule_AddObject(mod, "revlog_CAPI", caps);
 
 #ifdef WITH_RUST
 	rustlazyancestorsType.tp_new = PyType_GenericNew;
--- a/mercurial/changegroup.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/changegroup.py	Tue Jan 21 13:14:51 2020 -0500
@@ -85,7 +85,7 @@
                 fh = open(filename, b"wb", 131072)
         else:
             fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
-            fh = os.fdopen(fd, r"wb")
+            fh = os.fdopen(fd, "wb")
         cleanup = filename
         for c in chunks:
             fh.write(c)
@@ -287,8 +287,6 @@
         def revmap(x):
             return cl.rev(x)
 
-        changesets = 0
-
         try:
             # The transaction may already carry source information. In this
             # case we use the top level data. We overwrite the argument
@@ -315,15 +313,15 @@
             )
             self.callback = progress.increment
 
-            efiles = set()
+            efilesset = set()
 
             def onchangelog(cl, node):
-                efiles.update(cl.readfiles(node))
+                efilesset.update(cl.readfiles(node))
 
             self.changelogheader()
             deltas = self.deltaiter()
             cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
-            efiles = len(efiles)
+            efiles = len(efilesset)
 
             if not cgnodes:
                 repo.ui.develwarn(
@@ -436,7 +434,7 @@
 
             if changesets > 0:
 
-                def runhooks():
+                def runhooks(unused_success):
                     # These hooks run when the lock releases, not when the
                     # transaction closes. So it's possible for the changelog
                     # to have changed since we last saw it.
@@ -1150,7 +1148,9 @@
         def makelookupmflinknode(tree, nodes):
             if fastpathlinkrev:
                 assert not tree
-                return manifests.__getitem__
+                return (
+                    manifests.__getitem__  # pytype: disable=unsupported-operands
+                )
 
             def lookupmflinknode(x):
                 """Callback for looking up the linknode for manifests.
--- a/mercurial/changelog.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/changelog.py	Tue Jan 21 13:14:51 2020 -0500
@@ -21,7 +21,6 @@
     error,
     pycompat,
     revlog,
-    util,
 )
 from .utils import (
     dateutil,
@@ -168,10 +167,10 @@
 def _divertopener(opener, target):
     """build an opener that writes in 'target.a' instead of 'target'"""
 
-    def _divert(name, mode=b'r', checkambig=False):
+    def _divert(name, mode=b'r', checkambig=False, **kwargs):
         if name != target:
-            return opener(name, mode)
-        return opener(name + b".a", mode)
+            return opener(name, mode, **kwargs)
+        return opener(name + b".a", mode, **kwargs)
 
     return _divert
 
@@ -179,9 +178,10 @@
 def _delayopener(opener, target, buf):
     """build an opener that stores chunks in 'buf' instead of 'target'"""
 
-    def _delay(name, mode=b'r', checkambig=False):
+    def _delay(name, mode=b'r', checkambig=False, **kwargs):
         if name != target:
-            return opener(name, mode)
+            return opener(name, mode, **kwargs)
+        assert not kwargs
         return appender(opener, name, mode, buf)
 
     return _delay
@@ -212,10 +212,10 @@
     """
 
     __slots__ = (
-        r'_offsets',
-        r'_text',
-        r'_sidedata',
-        r'_cpsd',
+        '_offsets',
+        '_text',
+        '_sidedata',
+        '_cpsd',
     )
 
     def __new__(cls, text, sidedata, cpsd):
@@ -405,112 +405,8 @@
         self.filteredrevs = frozenset()
         self._copiesstorage = opener.options.get(b'copies-storage')
 
-    def tiprev(self):
-        for i in pycompat.xrange(len(self) - 1, -2, -1):
-            if i not in self.filteredrevs:
-                return i
-
-    def tip(self):
-        """filtered version of revlog.tip"""
-        return self.node(self.tiprev())
-
-    def __contains__(self, rev):
-        """filtered version of revlog.__contains__"""
-        return 0 <= rev < len(self) and rev not in self.filteredrevs
-
-    def __iter__(self):
-        """filtered version of revlog.__iter__"""
-        if len(self.filteredrevs) == 0:
-            return revlog.revlog.__iter__(self)
-
-        def filterediter():
-            for i in pycompat.xrange(len(self)):
-                if i not in self.filteredrevs:
-                    yield i
-
-        return filterediter()
-
-    def revs(self, start=0, stop=None):
-        """filtered version of revlog.revs"""
-        for i in super(changelog, self).revs(start, stop):
-            if i not in self.filteredrevs:
-                yield i
-
-    def _checknofilteredinrevs(self, revs):
-        """raise the appropriate error if 'revs' contains a filtered revision
-
-        This returns a version of 'revs' to be used thereafter by the caller.
-        In particular, if revs is an iterator, it is converted into a set.
-        """
-        safehasattr = util.safehasattr
-        if safehasattr(revs, '__next__'):
-            # Note that inspect.isgenerator() is not true for iterators,
-            revs = set(revs)
-
-        filteredrevs = self.filteredrevs
-        if safehasattr(revs, 'first'):  # smartset
-            offenders = revs & filteredrevs
-        else:
-            offenders = filteredrevs.intersection(revs)
-
-        for rev in offenders:
-            raise error.FilteredIndexError(rev)
-        return revs
-
-    def headrevs(self, revs=None):
-        if revs is None and self.filteredrevs:
-            try:
-                return self.index.headrevsfiltered(self.filteredrevs)
-            # AttributeError covers non-c-extension environments and
-            # old c extensions without filter handling.
-            except AttributeError:
-                return self._headrevs()
-
-        if self.filteredrevs:
-            revs = self._checknofilteredinrevs(revs)
-        return super(changelog, self).headrevs(revs)
-
-    def strip(self, *args, **kwargs):
-        # XXX make something better than assert
-        # We can't expect proper strip behavior if we are filtered.
-        assert not self.filteredrevs
-        super(changelog, self).strip(*args, **kwargs)
-
-    def rev(self, node):
-        """filtered version of revlog.rev"""
-        r = super(changelog, self).rev(node)
-        if r in self.filteredrevs:
-            raise error.FilteredLookupError(
-                hex(node), self.indexfile, _(b'filtered node')
-            )
-        return r
-
-    def node(self, rev):
-        """filtered version of revlog.node"""
-        if rev in self.filteredrevs:
-            raise error.FilteredIndexError(rev)
-        return super(changelog, self).node(rev)
-
-    def linkrev(self, rev):
-        """filtered version of revlog.linkrev"""
-        if rev in self.filteredrevs:
-            raise error.FilteredIndexError(rev)
-        return super(changelog, self).linkrev(rev)
-
-    def parentrevs(self, rev):
-        """filtered version of revlog.parentrevs"""
-        if rev in self.filteredrevs:
-            raise error.FilteredIndexError(rev)
-        return super(changelog, self).parentrevs(rev)
-
-    def flags(self, rev):
-        """filtered version of revlog.flags"""
-        if rev in self.filteredrevs:
-            raise error.FilteredIndexError(rev)
-        return super(changelog, self).flags(rev)
-
     def delayupdate(self, tr):
-        b"delay visibility of index updates to other readers"
+        """delay visibility of index updates to other readers"""
 
         if not self._delayed:
             if len(self) == 0:
@@ -528,7 +424,7 @@
         tr.addfinalize(b'cl-%i' % id(self), self._finalize)
 
     def _finalize(self, tr):
-        b"finalize index updates"
+        """finalize index updates"""
         self._delayed = False
         self.opener = self._realopener
         # move redirected index data back into place
@@ -548,7 +444,8 @@
         self._enforceinlinesize(tr)
 
     def _writepending(self, tr):
-        b"create a file containing the unfinalized state for pretxnchangegroup"
+        """create a file containing the unfinalized state for
+        pretxnchangegroup"""
         if self._delaybuf:
             # make a temporary copy of the index
             fp1 = self._realopener(self.indexfile)
--- a/mercurial/chgserver.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/chgserver.py	Tue Jan 21 13:14:51 2020 -0500
@@ -41,7 +41,6 @@
 
 from __future__ import absolute_import
 
-import hashlib
 import inspect
 import os
 import re
@@ -67,6 +66,7 @@
 )
 
 from .utils import (
+    hashutil,
     procutil,
     stringutil,
 )
@@ -74,7 +74,7 @@
 
 def _hashlist(items):
     """return sha1 hexdigest for a list"""
-    return node.hex(hashlib.sha1(stringutil.pprint(items)).digest())
+    return node.hex(hashutil.sha1(stringutil.pprint(items)).digest())
 
 
 # sensitive config sections affecting confighash
@@ -345,9 +345,9 @@
 
 _iochannels = [
     # server.ch, ui.fp, mode
-    (b'cin', b'fin', r'rb'),
-    (b'cout', b'fout', r'wb'),
-    (b'cerr', b'ferr', r'wb'),
+    (b'cin', b'fin', 'rb'),
+    (b'cout', b'fout', 'wb'),
+    (b'cerr', b'ferr', 'wb'),
 ]
 
 
@@ -505,7 +505,7 @@
         path = self._readstr()
         if not path:
             return
-        self.ui.log(b'chgserver', b'chdir to %r\n', path)
+        self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
         os.chdir(path)
 
     def setumask(self):
@@ -549,6 +549,41 @@
         except ValueError:
             raise ValueError(b'unexpected value in setenv request')
         self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
+
+        # Python3 has some logic to "coerce" the C locale to a UTF-8 capable
+        # one, and it sets LC_CTYPE in the environment to C.UTF-8 if none of
+        # 'LC_CTYPE', 'LC_ALL' or 'LANG' are set (to any value). This can be
+        # disabled with PYTHONCOERCECLOCALE=0 in the environment.
+        #
+        # When fromui is called via _inithashstate, python has already set
+        # this, so that's in the environment right when we start up the hg
+        # process. Then chg will call us and tell us to set the environment to
+        # the one it has; this might NOT have LC_CTYPE, so we'll need to
+        # carry-forward the LC_CTYPE that was coerced in these situations.
+        #
+        # If this is not handled, we will fail config+env validation and fail
+        # to start chg. If this is just ignored instead of carried forward, we
+        # may have different behavior between chg and non-chg.
+        if pycompat.ispy3:
+            # Rename for wordwrapping purposes
+            oldenv = encoding.environ
+            if not any(
+                e.get(b'PYTHONCOERCECLOCALE') == b'0' for e in [oldenv, newenv]
+            ):
+                keys = [b'LC_CTYPE', b'LC_ALL', b'LANG']
+                old_keys = [k for k, v in oldenv.items() if k in keys and v]
+                new_keys = [k for k, v in newenv.items() if k in keys and v]
+                # If the user's environment (from chg) doesn't have ANY of the
+                # keys that python looks for, and the environment (from
+                # initialization) has ONLY LC_CTYPE and it's set to C.UTF-8,
+                # carry it forward.
+                if (
+                    not new_keys
+                    and old_keys == [b'LC_CTYPE']
+                    and oldenv[b'LC_CTYPE'] == b'C.UTF-8'
+                ):
+                    newenv[b'LC_CTYPE'] = oldenv[b'LC_CTYPE']
+
         encoding.environ.clear()
         encoding.environ.update(newenv)
 
--- a/mercurial/cmdutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/cmdutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -24,6 +24,7 @@
     open,
     setattr,
 )
+from .thirdparty import attr
 
 from . import (
     bookmarks,
@@ -61,6 +62,15 @@
     stringutil,
 )
 
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Any,
+        Dict,
+    )
+
+    for t in (Any, Dict):
+        assert t
+
 stringio = util.stringio
 
 # templates of common command options
@@ -250,16 +260,45 @@
 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
 
 
+def check_at_most_one_arg(opts, *args):
+    """abort if more than one of the arguments are in opts
+
+    Returns the unique argument or None if none of them were specified.
+    """
+
+    def to_display(name):
+        return pycompat.sysbytes(name).replace(b'_', b'-')
+
+    previous = None
+    for x in args:
+        if opts.get(x):
+            if previous:
+                raise error.Abort(
+                    _(b'cannot specify both --%s and --%s')
+                    % (to_display(previous), to_display(x))
+                )
+            previous = x
+    return previous
+
+
+def check_incompatible_arguments(opts, first, *others):
+    """abort if the first argument is given along with any of the others
+
+    Unlike check_at_most_one_arg(), `others` are not mutually exclusive
+    among themselves.
+    """
+    for other in others:
+        check_at_most_one_arg(opts, first, other)
+
+
 def resolvecommitoptions(ui, opts):
     """modify commit options dict to handle related options
 
     The return value indicates that ``rewrite.update-timestamp`` is the reason
     the ``date`` option is set.
     """
-    if opts.get(b'date') and opts.get(b'currentdate'):
-        raise error.Abort(_(b'--date and --currentdate are mutually exclusive'))
-    if opts.get(b'user') and opts.get(b'currentuser'):
-        raise error.Abort(_(b'--user and --currentuser are mutually exclusive'))
+    check_at_most_one_arg(opts, b'date', b'currentdate')
+    check_at_most_one_arg(opts, b'user', b'currentuser')
 
     datemaydiffer = False  # date-only change should be ignored?
 
@@ -320,7 +359,7 @@
 def setupwrapcolorwrite(ui):
     # wrap ui.write so diff output can be labeled/colorized
     def wrapwrite(orig, *args, **kw):
-        label = kw.pop(r'label', b'')
+        label = kw.pop('label', b'')
         for chunk, l in patch.difflabel(lambda: args):
             orig(chunk, label=label + l)
 
@@ -347,7 +386,7 @@
                 ui, originalhunks, recordfn, operation
             )
     except crecordmod.fallbackerror as e:
-        ui.warn(b'%s\n' % e.message)
+        ui.warn(b'%s\n' % e)
         ui.warn(_(b'falling back to text mode\n'))
 
     return patch.filterpatch(ui, originalhunks, match, operation)
@@ -418,9 +457,7 @@
 
         force = opts.get(b'force')
         if not force:
-            vdirs = []
             match = matchmod.badmatch(match, fail)
-            match.explicitdir = vdirs.append
 
         status = repo.status(match=match)
 
@@ -429,13 +466,13 @@
         with repo.ui.configoverride(overrides, b'record'):
             # subrepoutil.precommit() modifies the status
             tmpstatus = scmutil.status(
-                copymod.copy(status[0]),
-                copymod.copy(status[1]),
-                copymod.copy(status[2]),
-                copymod.copy(status[3]),
-                copymod.copy(status[4]),
-                copymod.copy(status[5]),
-                copymod.copy(status[6]),
+                copymod.copy(status.modified),
+                copymod.copy(status.added),
+                copymod.copy(status.removed),
+                copymod.copy(status.deleted),
+                copymod.copy(status.unknown),
+                copymod.copy(status.ignored),
+                copymod.copy(status.clean),  # pytype: disable=wrong-arg-count
             )
 
             # Force allows -X subrepo to skip the subrepo.
@@ -448,7 +485,7 @@
                     raise error.Abort(dirtyreason)
 
         if not force:
-            repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
+            repo.checkcommitpatterns(wctx, match, status, fail)
         diffopts = patch.difffeatureopts(
             ui,
             opts=opts,
@@ -761,7 +798,7 @@
         tersedict[st].sort()
         tersedlist.append(tersedict[st])
 
-    return tersedlist
+    return scmutil.status(*tersedlist)
 
 
 def _commentlines(raw):
@@ -771,48 +808,101 @@
     return b'\n'.join(commentedlines) + b'\n'
 
 
-def _conflictsmsg(repo):
-    mergestate = mergemod.mergestate.read(repo)
-    if not mergestate.active():
-        return
-
-    m = scmutil.match(repo[None])
-    unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
-    if unresolvedlist:
-        mergeliststr = b'\n'.join(
-            [
-                b'    %s' % util.pathto(repo.root, encoding.getcwd(), path)
-                for path in sorted(unresolvedlist)
-            ]
-        )
-        msg = (
-            _(
-                '''Unresolved merge conflicts:
+@attr.s(frozen=True)
+class morestatus(object):
+    reporoot = attr.ib()
+    unfinishedop = attr.ib()
+    unfinishedmsg = attr.ib()
+    activemerge = attr.ib()
+    unresolvedpaths = attr.ib()
+    _formattedpaths = attr.ib(init=False, default=set())
+    _label = b'status.morestatus'
+
+    def formatfile(self, path, fm):
+        self._formattedpaths.add(path)
+        if self.activemerge and path in self.unresolvedpaths:
+            fm.data(unresolved=True)
+
+    def formatfooter(self, fm):
+        if self.unfinishedop or self.unfinishedmsg:
+            fm.startitem()
+            fm.data(itemtype=b'morestatus')
+
+        if self.unfinishedop:
+            fm.data(unfinished=self.unfinishedop)
+            statemsg = (
+                _(b'The repository is in an unfinished *%s* state.')
+                % self.unfinishedop
+            )
+            fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
+        if self.unfinishedmsg:
+            fm.data(unfinishedmsg=self.unfinishedmsg)
+
+        # May also start new data items.
+        self._formatconflicts(fm)
+
+        if self.unfinishedmsg:
+            fm.plain(
+                b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
+            )
+
+    def _formatconflicts(self, fm):
+        if not self.activemerge:
+            return
+
+        if self.unresolvedpaths:
+            mergeliststr = b'\n'.join(
+                [
+                    b'    %s'
+                    % util.pathto(self.reporoot, encoding.getcwd(), path)
+                    for path in self.unresolvedpaths
+                ]
+            )
+            msg = (
+                _(
+                    '''Unresolved merge conflicts:
 
 %s
 
 To mark files as resolved:  hg resolve --mark FILE'''
+                )
+                % mergeliststr
             )
-            % mergeliststr
-        )
-    else:
-        msg = _(b'No unresolved merge conflicts.')
-
-    return _commentlines(msg)
-
-
-def morestatus(repo, fm):
+
+            # If any paths with unresolved conflicts were not previously
+            # formatted, output them now.
+            for f in self.unresolvedpaths:
+                if f in self._formattedpaths:
+                    # Already output.
+                    continue
+                fm.startitem()
+                # We can't claim to know the status of the file - it may just
+                # have been in one of the states that were not requested for
+                # display, so it could be anything.
+                fm.data(itemtype=b'file', path=f, unresolved=True)
+
+        else:
+            msg = _(b'No unresolved merge conflicts.')
+
+        fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
+
+
+def readmorestatus(repo):
+    """Returns a morestatus object if the repo has unfinished state."""
     statetuple = statemod.getrepostate(repo)
-    label = b'status.morestatus'
+    mergestate = mergemod.mergestate.read(repo)
+    activemerge = mergestate.active()
+    if not statetuple and not activemerge:
+        return None
+
+    unfinishedop = unfinishedmsg = unresolved = None
     if statetuple:
-        state, helpfulmsg = statetuple
-        statemsg = _(b'The repository is in an unfinished *%s* state.') % state
-        fm.plain(b'%s\n' % _commentlines(statemsg), label=label)
-        conmsg = _conflictsmsg(repo)
-        if conmsg:
-            fm.plain(b'%s\n' % conmsg, label=label)
-        if helpfulmsg:
-            fm.plain(b'%s\n' % _commentlines(helpfulmsg), label=label)
+        unfinishedop, unfinishedmsg = statetuple
+    if activemerge:
+        unresolved = sorted(mergestate.unresolved())
+    return morestatus(
+        repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
+    )
 
 
 def findpossible(cmd, table, strict=False):
@@ -991,8 +1081,8 @@
 
     if merge and repo.dirstate.p2() != nullid:
         raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
-    modified, added, removed, deleted = repo.status()[:4]
-    if modified or added or removed or deleted:
+    st = repo.status()
+    if st.modified or st.added or st.removed or st.deleted:
         raise error.Abort(_(b'uncommitted changes'), hint=hint)
     ctx = repo[None]
     for s in sorted(ctx.substate):
@@ -1001,13 +1091,12 @@
 
 def logmessage(ui, opts):
     """ get the log message according to -m and -l option """
+
+    check_at_most_one_arg(opts, b'message', b'logfile')
+
     message = opts.get(b'message')
     logfile = opts.get(b'logfile')
 
-    if message and logfile:
-        raise error.Abort(
-            _(b'options --message and --logfile are mutually exclusive')
-        )
     if not message and logfile:
         try:
             if isstdiofilename(logfile):
@@ -1289,7 +1378,7 @@
             if isinstance(r, revlog.revlog):
                 pass
             elif util.safehasattr(r, b'_revlog'):
-                r = r._revlog
+                r = r._revlog  # pytype: disable=attribute-error
             elif r is not None:
                 raise error.Abort(_(b'%r does not appear to be a revlog') % r)
 
@@ -1764,6 +1853,8 @@
             overrides = {}
             if partial:
                 overrides[(b'ui', b'allowemptycommit')] = True
+            if opts.get(b'secret'):
+                overrides[(b'phases', b'new-commit')] = b'secret'
             with repo.ui.configoverride(overrides, b'import'):
                 n = repo.commit(
                     message, user, date, match=m, editor=editor, extra=extra
@@ -2022,7 +2113,7 @@
         rev = ctx.rev()
         if rev in results:
             ui.status(
-                _(b"found revision %s from %s\n")
+                _(b"found revision %d from %s\n")
                 % (rev, dateutil.datestr(results[rev]))
             )
             return b'%d' % rev
@@ -2338,12 +2429,16 @@
 
                     def fns_generator():
                         if allfiles:
-                            fiter = iter(ctx)
+
+                            def bad(f, msg):
+                                pass
+
+                            for f in ctx.matches(matchmod.badmatch(match, bad)):
+                                yield f
                         else:
-                            fiter = ctx.files()
-                        for f in fiter:
-                            if match(f):
-                                yield f
+                            for f in ctx.files():
+                                if match(f):
+                                    yield f
 
                     fns = fns_generator()
                 prepare(ctx, fns)
@@ -2397,7 +2492,7 @@
             submatch = matchmod.subdirmatcher(subpath, match)
             subprefix = repo.wvfs.reljoin(prefix, subpath)
             subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
-            if opts.get(r'subrepos'):
+            if opts.get('subrepos'):
                 bad.extend(
                     sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
                 )
@@ -2410,7 +2505,7 @@
                 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
             )
 
-    if not opts.get(r'dry_run'):
+    if not opts.get('dry_run'):
         rejected = wctx.add(names, prefix)
         bad.extend(f for f in rejected if f in match.files())
     return bad
@@ -2565,7 +2660,7 @@
 ):
     ret = 0
     s = repo.status(match=m, clean=True)
-    modified, added, deleted, clean = s[0], s[1], s[3], s[6]
+    modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
 
     wctx = repo[None]
 
@@ -2606,7 +2701,7 @@
     progress.complete()
 
     # warn about failure to delete explicit files/dirs
-    deleteddirs = util.dirs(deleted)
+    deleteddirs = pathutil.dirs(deleted)
     files = m.files()
     progress = ui.makeprogress(
         _(b'deleting'), total=len(files), unit=_(b'files')
@@ -2876,7 +2971,8 @@
         if len(old.parents()) > 1:
             # ctx.files() isn't reliable for merges, so fall back to the
             # slower repo.status() method
-            files = {fn for st in base.status(old)[:3] for fn in st}
+            st = base.status(old)
+            files = set(st.modified) | set(st.added) | set(st.removed)
         else:
             files = set(old.files())
 
@@ -3044,11 +3140,13 @@
         # selectively update the dirstate only for the amended files.
         dirstate = repo.dirstate
 
-        # Update the state of the files which were added and
-        # and modified in the amend to "normal" in the dirstate.
+        # Update the state of the files which were added and modified in the
+        # amend to "normal" in the dirstate. We need to use "normallookup" since
+        # the files may have changed since the command started; using "normal"
+        # would mark them as clean but with uncommitted contents.
         normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
         for f in normalfiles:
-            dirstate.normal(f)
+            dirstate.normallookup(f)
 
         # Update the state of files which were removed in the amend
         # to "removed" in the dirstate.
@@ -3958,6 +4056,7 @@
 
 
 def readgraftstate(repo, graftstate):
+    # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
     """read the graft state file and return a dict of the data stored in it"""
     try:
         return graftstate.read()
--- a/mercurial/color.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/color.py	Tue Jan 21 13:14:51 2020 -0500
@@ -145,6 +145,9 @@
     b'status.unknown': b'magenta bold underline',
     b'tags.normal': b'green',
     b'tags.local': b'black bold',
+    b'upgrade-repo.requirement.preserved': b'cyan',
+    b'upgrade-repo.requirement.added': b'green',
+    b'upgrade-repo.requirement.removed': b'red',
 }
 
 
@@ -184,7 +187,7 @@
             # noisy and use ui.debug().
             ui.debug(b"no terminfo entry for %s\n" % e)
             del ui._terminfoparams[key]
-    if not curses.tigetstr(r'setaf') or not curses.tigetstr(r'setab'):
+    if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
         # Only warn about missing terminfo entries if we explicitly asked for
         # terminfo mode and we're in a formatted terminal.
         if mode == b"terminfo" and formatted:
@@ -330,7 +333,7 @@
 
 
 def valideffect(ui, effect):
-    b'Determine if the effect is valid or not.'
+    """Determine if the effect is valid or not."""
     return (not ui._terminfoparams and effect in _activeeffects(ui)) or (
         effect in ui._terminfoparams or effect[:-11] in ui._terminfoparams
     )
@@ -353,9 +356,9 @@
         else:
             return curses.tigetstr(pycompat.sysstr(val))
     elif bg:
-        return curses.tparm(curses.tigetstr(r'setab'), val)
+        return curses.tparm(curses.tigetstr('setab'), val)
     else:
-        return curses.tparm(curses.tigetstr(r'setaf'), val)
+        return curses.tparm(curses.tigetstr('setaf'), val)
 
 
 def _mergeeffects(text, start, stop):
@@ -377,7 +380,7 @@
 
 
 def _render_effects(ui, text, effects):
-    b'Wrap text in commands to turn on each effect.'
+    """Wrap text in commands to turn on each effect."""
     if not text:
         return text
     if ui._terminfoparams:
@@ -435,30 +438,30 @@
 if pycompat.iswindows:
     import ctypes
 
-    _kernel32 = ctypes.windll.kernel32
+    _kernel32 = ctypes.windll.kernel32  # pytype: disable=module-attr
 
     _WORD = ctypes.c_ushort
 
     _INVALID_HANDLE_VALUE = -1
 
     class _COORD(ctypes.Structure):
-        _fields_ = [(r'X', ctypes.c_short), (r'Y', ctypes.c_short)]
+        _fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
 
     class _SMALL_RECT(ctypes.Structure):
         _fields_ = [
-            (r'Left', ctypes.c_short),
-            (r'Top', ctypes.c_short),
-            (r'Right', ctypes.c_short),
-            (r'Bottom', ctypes.c_short),
+            ('Left', ctypes.c_short),
+            ('Top', ctypes.c_short),
+            ('Right', ctypes.c_short),
+            ('Bottom', ctypes.c_short),
         ]
 
     class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
         _fields_ = [
-            (r'dwSize', _COORD),
-            (r'dwCursorPosition', _COORD),
-            (r'wAttributes', _WORD),
-            (r'srWindow', _SMALL_RECT),
-            (r'dwMaximumWindowSize', _COORD),
+            ('dwSize', _COORD),
+            ('dwCursorPosition', _COORD),
+            ('wAttributes', _WORD),
+            ('srWindow', _SMALL_RECT),
+            ('dwMaximumWindowSize', _COORD),
         ]
 
     _STD_OUTPUT_HANDLE = 0xFFFFFFF5  # (DWORD)-11
@@ -529,7 +532,7 @@
             )
 
     def win32print(ui, writefunc, text, **opts):
-        label = opts.get(r'label', b'')
+        label = opts.get('label', b'')
         attr = origattr
 
         def mapcolor(val, attr):
--- a/mercurial/commands.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/commands.py	Tue Jan 21 13:14:51 2020 -0500
@@ -180,7 +180,7 @@
 
     use --dry-run/-n to dry run the command.
     """
-    dryrun = opts.get(r'dry_run')
+    dryrun = opts.get('dry_run')
     abortstate = cmdutil.getunfinishedstate(repo)
     if not abortstate:
         raise error.Abort(_(b'no operation in progress'))
@@ -362,7 +362,7 @@
             b'',
             b'skip',
             [],
-            _(b'revision to not display (EXPERIMENTAL)'),
+            _(b'revset to not display (EXPERIMENTAL)'),
             _(b'REV'),
         ),
     ]
@@ -559,7 +559,7 @@
                 ml = max(sizes)
                 formats.append([sep + b' ' * (ml - w) + b'%s' for w in sizes])
             else:
-                formats.append([b'%s' for x in l])
+                formats.append([b'%s'] * len(l))
             pieces.append(l)
 
         for f, p, n in zip(zip(*formats), zip(*pieces), lines):
@@ -1073,7 +1073,7 @@
                 raise error.Abort(_(b'current bisect revision is a merge'))
         if rev:
             node = repo[scmutil.revsingle(repo, rev, node)].node()
-        try:
+        with hbisect.restore_state(repo, state, node):
             while changesets:
                 # update state
                 state[b'current'] = [node]
@@ -1105,9 +1105,6 @@
                 # update to next check
                 node = nodes[0]
                 mayupdate(repo, node, show_stats=False)
-        finally:
-            state[b'current'] = [node]
-            hbisect.save_state(repo, state)
         hbisect.printresult(ui, repo, state, displayer, nodes, bgood)
         return
 
@@ -1229,13 +1226,9 @@
     rev = opts.get(b'rev')
     inactive = opts.get(b'inactive')  # meaning add/rename to inactive bookmark
 
-    selactions = [k for k in [b'delete', b'rename', b'list'] if opts.get(k)]
-    if len(selactions) > 1:
-        raise error.Abort(
-            _(b'--%s and --%s are incompatible') % tuple(selactions[:2])
-        )
-    if selactions:
-        action = selactions[0]
+    action = cmdutil.check_at_most_one_arg(opts, b'delete', b'rename', b'list')
+    if action:
+        cmdutil.check_incompatible_arguments(opts, action, b'rev')
     elif names or rev:
         action = b'add'
     elif inactive:
@@ -1243,10 +1236,7 @@
     else:
         action = b'list'
 
-    if rev and action in {b'delete', b'rename', b'list'}:
-        raise error.Abort(_(b"--rev is incompatible with --%s") % action)
-    if inactive and action in {b'delete', b'list'}:
-        raise error.Abort(_(b"--inactive is incompatible with --%s") % action)
+    cmdutil.check_incompatible_arguments(opts, b'inactive', b'delete', b'list')
     if not names and action in {b'add', b'delete'}:
         raise error.Abort(_(b"bookmark name required"))
 
@@ -1892,8 +1882,7 @@
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
-    if opts.get(b'noupdate') and opts.get(b'updaterev'):
-        raise error.Abort(_(b"cannot specify both --noupdate and --updaterev"))
+    cmdutil.check_at_most_one_arg(opts, b'noupdate', b'updaterev')
 
     # --include/--exclude can come from narrow or sparse.
     includepats, excludepats = None, None
@@ -2019,8 +2008,8 @@
 
 
 def _docommit(ui, repo, *pats, **opts):
-    if opts.get(r'interactive'):
-        opts.pop(r'interactive')
+    if opts.get('interactive'):
+        opts.pop('interactive')
         ret = cmdutil.dorecord(
             ui, repo, commit, None, False, cmdutil.recordfilter, *pats, **opts
         )
@@ -2116,13 +2105,13 @@
 
         if not node:
             stat = cmdutil.postcommitstatus(repo, pats, opts)
-            if stat[3]:
+            if stat.deleted:
                 ui.status(
                     _(
                         b"nothing changed (%d missing files, see "
                         b"'hg status')\n"
                     )
-                    % len(stat[3])
+                    % len(stat.deleted)
                 )
             else:
                 ui.status(_(b"nothing changed\n"))
@@ -2234,9 +2223,11 @@
     for t, f in rcutil.rccomponents():
         if t == b'path':
             ui.debug(b'read config from: %s\n' % f)
+        elif t == b'resource':
+            ui.debug(b'read config from: resource:%s.%s\n' % (f[0], f[1]))
         elif t == b'items':
-            for section, name, value, source in f:
-                ui.debug(b'set config by: %s\n' % source)
+            # Don't print anything for 'items'.
+            pass
         else:
             raise error.ProgrammingError(b'unknown rctype: %s' % t)
     untrusted = bool(opts.get(b'untrusted'))
@@ -2295,7 +2286,7 @@
 
     use --dry-run/-n to dry run the command.
     """
-    dryrun = opts.get(r'dry_run')
+    dryrun = opts.get('dry_run')
     contstate = cmdutil.getunfinishedstate(repo)
     if not contstate:
         raise error.Abort(_(b'no operation in progress'))
@@ -2375,7 +2366,7 @@
 def debugcomplete(ui, cmd=b'', **opts):
     """returns the completion list associated with the given command"""
 
-    if opts.get(r'options'):
+    if opts.get('options'):
         options = []
         otables = [globalopts]
         if cmd:
@@ -2614,8 +2605,7 @@
     bookmark = opts.get(b'bookmark')
     changesets += tuple(opts.get(b'rev', []))
 
-    if bookmark and changesets:
-        raise error.Abort(_(b"-r and -B are mutually exclusive"))
+    cmdutil.check_at_most_one_arg(opts, b'rev', b'bookmark')
 
     if bookmark:
         if bookmark not in repo._bookmarks:
@@ -2974,14 +2964,7 @@
     # list of new nodes created by ongoing graft
     statedata[b'newnodes'] = []
 
-    if opts.get(b'user') and opts.get(b'currentuser'):
-        raise error.Abort(_(b'--user and --currentuser are mutually exclusive'))
-    if opts.get(b'date') and opts.get(b'currentdate'):
-        raise error.Abort(_(b'--date and --currentdate are mutually exclusive'))
-    if not opts.get(b'user') and opts.get(b'currentuser'):
-        opts[b'user'] = ui.username()
-    if not opts.get(b'date') and opts.get(b'currentdate'):
-        opts[b'date'] = b"%d %d" % dateutil.makedate()
+    cmdutil.resolvecommitoptions(ui, opts)
 
     editor = cmdutil.getcommiteditor(
         editform=b'graft', **pycompat.strkwargs(opts)
@@ -3096,17 +3079,12 @@
     # already, they'd have been in the graftstate.
     if not (cont or opts.get(b'force')) and basectx is None:
         # check for ancestors of dest branch
-        crev = repo[b'.'].rev()
-        ancestors = repo.changelog.ancestors([crev], inclusive=True)
-        # XXX make this lazy in the future
-        # don't mutate while iterating, create a copy
-        for rev in list(revs):
-            if rev in ancestors:
-                ui.warn(
-                    _(b'skipping ancestor revision %d:%s\n') % (rev, repo[rev])
-                )
-                # XXX remove on list is slow
-                revs.remove(rev)
+        ancestors = repo.revs(b'%ld & (::.)', revs)
+        for rev in ancestors:
+            ui.warn(_(b'skipping ancestor revision %d:%s\n') % (rev, repo[rev]))
+
+        revs = [r for r in revs if r not in ancestors]
+
         if not revs:
             return -1
 
@@ -3123,7 +3101,7 @@
 
         # The only changesets we can be sure doesn't contain grafts of any
         # revs, are the ones that are common ancestors of *all* revs:
-        for rev in repo.revs(b'only(%d,ancestor(%ld))', crev, revs):
+        for rev in repo.revs(b'only(%d,ancestor(%ld))', repo[b'.'].rev(), revs):
             ctx = repo[rev]
             n = ctx.extra().get(b'source')
             if n in ids:
@@ -3446,6 +3424,9 @@
     def grepbody(fn, rev, body):
         matches[rev].setdefault(fn, [])
         m = matches[rev][fn]
+        if body is None:
+            return
+
         for lnum, cstart, cend, line in matchlines(body):
             s = linestate(line, lnum, cstart, cend)
             m.append(s)
@@ -3453,13 +3434,13 @@
     def difflinestates(a, b):
         sm = difflib.SequenceMatcher(None, a, b)
         for tag, alo, ahi, blo, bhi in sm.get_opcodes():
-            if tag == r'insert':
+            if tag == 'insert':
                 for i in pycompat.xrange(blo, bhi):
                     yield (b'+', b[i])
-            elif tag == r'delete':
+            elif tag == 'delete':
                 for i in pycompat.xrange(alo, ahi):
                     yield (b'-', a[i])
-            elif tag == r'replace':
+            elif tag == 'replace':
                 for i in pycompat.xrange(alo, ahi):
                     yield (b'-', a[i])
                 for i in pycompat.xrange(blo, bhi):
@@ -3581,6 +3562,19 @@
 
     getrenamed = scmutil.getrenamedfn(repo)
 
+    def get_file_content(filename, filelog, filenode, context, revision):
+        try:
+            content = filelog.read(filenode)
+        except error.WdirUnsupported:
+            content = context[filename].data()
+        except error.CensoredNodeError:
+            content = None
+            ui.warn(
+                _(b'cannot search in censored file: %(filename)s:%(revnum)s\n')
+                % {b'filename': filename, b'revnum': pycompat.bytestr(revision)}
+            )
+        return content
+
     def prep(ctx, fns):
         rev = ctx.rev()
         pctx = ctx.p1()
@@ -3607,17 +3601,15 @@
             files.append(fn)
 
             if fn not in matches[rev]:
-                try:
-                    content = flog.read(fnode)
-                except error.WdirUnsupported:
-                    content = ctx[fn].data()
+                content = get_file_content(fn, flog, fnode, ctx, rev)
                 grepbody(fn, rev, content)
 
             pfn = copy or fn
             if pfn not in matches[parent]:
                 try:
-                    fnode = pctx.filenode(pfn)
-                    grepbody(pfn, parent, flog.read(fnode))
+                    pfnode = pctx.filenode(pfn)
+                    pcontent = get_file_content(pfn, flog, pfnode, pctx, parent)
+                    grepbody(pfn, parent, pcontent)
                 except error.LookupError:
                     pass
 
@@ -3775,7 +3767,7 @@
     Returns 0 if successful.
     """
 
-    keep = opts.get(r'system') or []
+    keep = opts.get('system') or []
     if len(keep) == 0:
         if pycompat.sysplatform.startswith(b'win'):
             keep.append(b'windows')
@@ -4022,6 +4014,7 @@
             _(b'NUM'),
         ),
         (b'b', b'base', b'', _(b'base path (DEPRECATED)'), _(b'PATH')),
+        (b'', b'secret', None, _(b'use the secret phase for committing')),
         (b'e', b'edit', False, _(b'invoke editor on commit messages')),
         (
             b'f',
@@ -4170,6 +4163,8 @@
     update = not opts.get(b'bypass')
     if not update and opts.get(b'no_commit'):
         raise error.Abort(_(b'cannot use --no-commit with --bypass'))
+    if opts.get(b'secret') and opts.get(b'no_commit'):
+        raise error.Abort(_(b'cannot use --no-commit with --secret'))
     try:
         sim = float(opts.get(b'similarity') or 0)
     except ValueError:
@@ -4874,6 +4869,13 @@
         node = scmutil.revsingle(repo, node).node()
 
     if not node and not abort:
+        if ui.configbool(b'commands', b'merge.require-rev'):
+            raise error.Abort(
+                _(
+                    b'configuration requires specifying revision to merge '
+                    b'with'
+                )
+            )
         node = repo[destutil.destmerge(repo)].node()
 
     if opts.get(b'preview'):
@@ -5685,7 +5687,7 @@
     """
     ret = repo.recover()
     if ret:
-        if opts[r'verify']:
+        if opts['verify']:
             return hg.verify(repo)
         else:
             msg = _(
@@ -6330,7 +6332,7 @@
             _(b'rollback is disabled because it is unsafe'),
             hint=b'see `hg help -v rollback` for information',
         )
-    return repo.rollback(dryrun=opts.get(r'dry_run'), force=opts.get(r'force'))
+    return repo.rollback(dryrun=opts.get('dry_run'), force=opts.get('force'))
 
 
 @command(
@@ -6803,7 +6805,6 @@
         end = b'\0'
     else:
         end = b'\n'
-    copy = {}
     states = b'modified added removed deleted unknown ignored clean'.split()
     show = [k for k in states if opts.get(k)]
     if opts.get(b'all'):
@@ -6840,8 +6841,13 @@
             opts.get(b'subrepos'),
         )
 
-    changestates = zip(states, pycompat.iterbytestr(b'MAR!?IC'), stat)
-
+    changestates = zip(
+        states,
+        pycompat.iterbytestr(b'MAR!?IC'),
+        [getattr(stat, s.decode('utf8')) for s in states],
+    )
+
+    copy = {}
     if (
         opts.get(b'all')
         or opts.get(b'copies')
@@ -6849,6 +6855,12 @@
     ) and not opts.get(b'no_status'):
         copy = copies.pathcopies(ctx1, ctx2, m)
 
+    morestatus = None
+    if (
+        ui.verbose or ui.configbool(b'commands', b'status.verbose')
+    ) and not ui.plain():
+        morestatus = cmdutil.readmorestatus(repo)
+
     ui.pager(b'status')
     fm = ui.formatter(b'status', opts)
     fmt = b'%s' + end
@@ -6860,7 +6872,7 @@
             for f in files:
                 fm.startitem()
                 fm.context(ctx=ctx2)
-                fm.data(path=f)
+                fm.data(itemtype=b'file', path=f)
                 fm.condwrite(showchar, b'status', b'%s ', char, label=label)
                 fm.plain(fmt % uipathfn(f), label=label)
                 if f in copy:
@@ -6869,11 +6881,11 @@
                         (b'  %s' + end) % uipathfn(copy[f]),
                         label=b'status.copied',
                     )
-
-    if (
-        ui.verbose or ui.configbool(b'commands', b'status.verbose')
-    ) and not ui.plain():
-        cmdutil.morestatus(repo, fm)
+                if morestatus:
+                    morestatus.formatfile(f, fm)
+
+    if morestatus:
+        morestatus.formatfooter(fm)
     fm.end()
 
 
@@ -7480,7 +7492,7 @@
                 )
             modheads = bundle2.combinechangegroupresults(op)
 
-    return postincoming(ui, repo, modheads, opts.get(r'update'), None, None)
+    return postincoming(ui, repo, modheads, opts.get('update'), None, None)
 
 
 @command(
@@ -7511,7 +7523,7 @@
             _(b'DATE'),
         ),
     ],
-    _(b'hg unshelve [OPTION]... [FILE]... [-n SHELVED]'),
+    _(b'hg unshelve [OPTION]... [[-n] SHELVED]'),
     helpcategory=command.CATEGORY_WORKING_DIRECTORY,
 )
 def unshelve(ui, repo, *shelved, **opts):
@@ -7535,9 +7547,9 @@
     that causes a conflict. This reverts the unshelved changes, and
     leaves the bundle in place.)
 
-    If bare shelved change (when no files are specified, without interactive,
-    include and exclude option) was done on newly created branch it would
-    restore branch information to the working directory.
+    If bare shelved change (without interactive, include and exclude
+    option) was done on newly created branch it would restore branch
+    information to the working directory.
 
     After a successful unshelve, the shelved changes are stored in a
     backup directory. Only the N most recent backups are kept. N
@@ -7641,11 +7653,11 @@
 
     Returns 0 on success, 1 if there are unresolved files.
     """
-    rev = opts.get(r'rev')
-    date = opts.get(r'date')
-    clean = opts.get(r'clean')
-    check = opts.get(r'check')
-    merge = opts.get(r'merge')
+    rev = opts.get('rev')
+    date = opts.get('date')
+    clean = opts.get('clean')
+    check = opts.get('check')
+    merge = opts.get('merge')
     if rev and node:
         raise error.Abort(_(b"please specify just one revision"))
 
@@ -7688,7 +7700,7 @@
         ctx = scmutil.revsingle(repo, rev, default=None)
         rev = ctx.rev()
         hidden = ctx.hidden()
-        overrides = {(b'ui', b'forcemerge'): opts.get(r'tool', b'')}
+        overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
         with ui.configoverride(overrides, b'update'):
             ret = hg.updatetotally(
                 ui, repo, rev, brev, clean=clean, updatecheck=updatecheck
--- a/mercurial/commandserver.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/commandserver.py	Tue Jan 21 13:14:51 2020 -0500
@@ -64,7 +64,7 @@
         self.out.flush()
 
     def __getattr__(self, attr):
-        if attr in (r'isatty', r'fileno', r'tell', r'seek'):
+        if attr in ('isatty', 'fileno', 'tell', 'seek'):
             raise AttributeError(attr)
         return getattr(self.out, attr)
 
@@ -180,7 +180,7 @@
     __next__ = next
 
     def __getattr__(self, attr):
-        if attr in (r'isatty', r'fileno', r'tell', r'seek'):
+        if attr in ('isatty', 'fileno', 'tell', 'seek'):
             raise AttributeError(attr)
         return getattr(self.in_, attr)
 
@@ -450,8 +450,8 @@
 
 
 def _serverequest(ui, repo, conn, createcmdserver, prereposetups):
-    fin = conn.makefile(r'rb')
-    fout = conn.makefile(r'wb')
+    fin = conn.makefile('rb')
+    fout = conn.makefile('wb')
     sv = None
     try:
         sv = createcmdserver(repo, conn, fin, fout, prereposetups)
--- a/mercurial/config.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/config.py	Tue Jan 21 13:14:51 2020 -0500
@@ -212,7 +212,7 @@
     def read(self, path, fp=None, sections=None, remap=None):
         if not fp:
             fp = util.posixfile(path, b'rb')
-        assert getattr(fp, 'mode', r'rb') == r'rb', (
+        assert getattr(fp, 'mode', 'rb') == 'rb', (
             b'config files must be opened in binary mode, got fp=%r mode=%r'
             % (fp, fp.mode,)
         )
--- a/mercurial/configitems.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/configitems.py	Tue Jan 21 13:14:51 2020 -0500
@@ -228,6 +228,9 @@
     b'commands', b'grep.all-files', default=False, experimental=True,
 )
 coreconfigitem(
+    b'commands', b'merge.require-rev', default=False,
+)
+coreconfigitem(
     b'commands', b'push.require-revs', default=False,
 )
 coreconfigitem(
@@ -433,6 +436,9 @@
     b'devel', b'debug.extensions', default=False,
 )
 coreconfigitem(
+    b'devel', b'debug.repo-filters', default=False,
+)
+coreconfigitem(
     b'devel', b'debug.peer-request', default=False,
 )
 coreconfigitem(
@@ -651,6 +657,9 @@
     b'experimental', b'revisions.disambiguatewithin', default=None,
 )
 coreconfigitem(
+    b'experimental', b'rust.index', default=False,
+)
+coreconfigitem(
     b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
 )
 coreconfigitem(
@@ -703,6 +712,9 @@
     b'experimental', b'worker.wdir-get-thread-safe', default=False,
 )
 coreconfigitem(
+    b'experimental', b'worker.repository-upgrade', default=False,
+)
+coreconfigitem(
     b'experimental', b'xdiff', default=False,
 )
 coreconfigitem(
--- a/mercurial/context.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/context.py	Tue Jan 21 13:14:51 2020 -0500
@@ -71,7 +71,7 @@
     __str__ = encoding.strmethod(__bytes__)
 
     def __repr__(self):
-        return r"<%s %s>" % (type(self).__name__, str(self))
+        return "<%s %s>" % (type(self).__name__, str(self))
 
     def __eq__(self, other):
         try:
@@ -200,8 +200,8 @@
     def mutable(self):
         return self.phase() > phases.public
 
-    def matchfileset(self, expr, badfn=None):
-        return fileset.match(self, expr, badfn=badfn)
+    def matchfileset(self, cwd, expr, badfn=None):
+        return fileset.match(self, cwd, expr, badfn=badfn)
 
     def obsolete(self):
         """True if the changeset is obsolete"""
@@ -265,14 +265,14 @@
         return self._repo[nullrev]
 
     def _fileinfo(self, path):
-        if r'_manifest' in self.__dict__:
+        if '_manifest' in self.__dict__:
             try:
                 return self._manifest[path], self._manifest.flags(path)
             except KeyError:
                 raise error.ManifestLookupError(
                     self._node, path, _(b'not found in manifest')
                 )
-        if r'_manifestdelta' in self.__dict__ or path in self.files():
+        if '_manifestdelta' in self.__dict__ or path in self.files():
             if path in self._manifestdelta:
                 return (
                     self._manifestdelta[path],
@@ -328,11 +328,14 @@
         default=b'glob',
         listsubrepos=False,
         badfn=None,
+        cwd=None,
     ):
         r = self._repo
+        if not cwd:
+            cwd = r.getcwd()
         return matchmod.match(
             r.root,
-            r.getcwd(),
+            cwd,
             pats,
             include,
             exclude,
@@ -449,11 +452,25 @@
                     unknown=listunknown,
                     listsubrepos=True,
                 )
-                for rfiles, sfiles in zip(r, s):
+                for k in (
+                    'modified',
+                    'added',
+                    'removed',
+                    'deleted',
+                    'unknown',
+                    'ignored',
+                    'clean',
+                ):
+                    rfiles, sfiles = getattr(r, k), getattr(s, k)
                     rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
 
-        for l in r:
-            l.sort()
+        r.modified.sort()
+        r.added.sort()
+        r.removed.sort()
+        r.deleted.sort()
+        r.unknown.sort()
+        r.ignored.sort()
+        r.clean.sort()
 
         return r
 
@@ -463,10 +480,17 @@
     changeset convenient. It represents a read-only context already present in
     the repo."""
 
-    def __init__(self, repo, rev, node):
+    def __init__(self, repo, rev, node, maybe_filtered=True):
         super(changectx, self).__init__(repo)
         self._rev = rev
         self._node = node
+        # When maybe_filtered is True, the revision might be affected by
+        # changelog filtering and operation through the filtered changelog must be used.
+        #
+        # When maybe_filtered is False, the revision has already been checked
+        # against filtering and is not filtered. Operation through the
+        # unfiltered changelog might be used in some case.
+        self._maybe_filtered = maybe_filtered
 
     def __hash__(self):
         try:
@@ -481,7 +505,11 @@
 
     @propertycache
     def _changeset(self):
-        return self._repo.changelog.changelogrevision(self.rev())
+        if self._maybe_filtered:
+            repo = self._repo
+        else:
+            repo = self._repo.unfiltered()
+        return repo.changelog.changelogrevision(self.rev())
 
     @propertycache
     def _manifest(self):
@@ -498,10 +526,18 @@
     @propertycache
     def _parents(self):
         repo = self._repo
-        p1, p2 = repo.changelog.parentrevs(self._rev)
+        if self._maybe_filtered:
+            cl = repo.changelog
+        else:
+            cl = repo.unfiltered().changelog
+
+        p1, p2 = cl.parentrevs(self._rev)
         if p2 == nullrev:
-            return [repo[p1]]
-        return [repo[p1], repo[p2]]
+            return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
+        return [
+            changectx(repo, p1, cl.node(p1), maybe_filtered=False),
+            changectx(repo, p2, cl.node(p2), maybe_filtered=False),
+        ]
 
     def changeset(self):
         c = self._changeset
@@ -746,9 +782,9 @@
 
     @propertycache
     def _changeid(self):
-        if r'_changectx' in self.__dict__:
+        if '_changectx' in self.__dict__:
             return self._changectx.rev()
-        elif r'_descendantrev' in self.__dict__:
+        elif '_descendantrev' in self.__dict__:
             # this file context was created from a revision with a known
             # descendant, we can (lazily) correct for linkrev aliases
             return self._adjustlinkrev(self._descendantrev)
@@ -757,7 +793,7 @@
 
     @propertycache
     def _filenode(self):
-        if r'_fileid' in self.__dict__:
+        if '_fileid' in self.__dict__:
             return self._filelog.lookup(self._fileid)
         else:
             return self._changectx.filenode(self._path)
@@ -789,7 +825,7 @@
     __str__ = encoding.strmethod(__bytes__)
 
     def __repr__(self):
-        return r"<%s %s>" % (type(self).__name__, str(self))
+        return "<%s %s>" % (type(self).__name__, str(self))
 
     def __hash__(self):
         try:
@@ -1024,16 +1060,16 @@
         """
         toprev = None
         attrs = vars(self)
-        if r'_changeid' in attrs:
+        if '_changeid' in attrs:
             # We have a cached value already
             toprev = self._changeid
-        elif r'_changectx' in attrs:
+        elif '_changectx' in attrs:
             # We know which changelog entry we are coming from
             toprev = self._changectx.rev()
 
         if toprev is not None:
             return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
-        elif r'_descendantrev' in attrs:
+        elif '_descendantrev' in attrs:
             introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
             # be nice and cache the result of the computation
             if introrev is not None:
@@ -1053,14 +1089,14 @@
     def _parentfilectx(self, path, fileid, filelog):
         """create parent filectx keeping ancestry info for _adjustlinkrev()"""
         fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
-        if r'_changeid' in vars(self) or r'_changectx' in vars(self):
+        if '_changeid' in vars(self) or '_changectx' in vars(self):
             # If self is associated with a changeset (probably explicitly
             # fed), ensure the created filectx is associated with a
             # changeset that is an ancestor of self.changectx.
             # This lets us later use _adjustlinkrev to get a correct link.
             fctx._descendantrev = self.rev()
             fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
-        elif r'_descendantrev' in vars(self):
+        elif '_descendantrev' in vars(self):
             # Otherwise propagate _descendantrev if we have one associated.
             fctx._descendantrev = self._descendantrev
             fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
@@ -1120,7 +1156,7 @@
             # renamed filectx won't have a filelog yet, so set it
             # from the cache to save time
             for p in pl:
-                if not r'_filelog' in p.__dict__:
+                if not '_filelog' in p.__dict__:
                     p._filelog = getlog(p.path())
 
             return pl
@@ -1128,7 +1164,9 @@
         # use linkrev to find the first changeset where self appeared
         base = self.introfilectx()
         if getattr(base, '_ancestrycontext', None) is None:
-            cl = self._repo.changelog
+            # it is safe to use an unfiltered repository here because we are
+            # walking ancestors only.
+            cl = self._repo.unfiltered().changelog
             if base.rev() is None:
                 # wctx is not inclusive, but works because _ancestrycontext
                 # is used to test filelog revisions
@@ -1409,7 +1447,7 @@
         return b
 
     def phase(self):
-        phase = phases.draft  # default phase to draft
+        phase = phases.newcommitphase(self._repo.ui)
         for p in self.parents():
             phase = max(phase, p.phase())
         return phase
@@ -1488,7 +1526,29 @@
             p = p[:-1]
         # use unfiltered repo to delay/avoid loading obsmarkers
         unfi = self._repo.unfiltered()
-        return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
+        return [
+            changectx(
+                self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
+            )
+            for n in p
+        ]
+
+    def setparents(self, p1node, p2node=nullid):
+        dirstate = self._repo.dirstate
+        with dirstate.parentchange():
+            copies = dirstate.setparents(p1node, p2node)
+            pctx = self._repo[p1node]
+            if copies:
+                # Adjust copy records, the dirstate cannot do it, it
+                # requires access to parents manifests. Preserve them
+                # only for entries added to first parent.
+                for f in copies:
+                    if f not in pctx and copies[f] in pctx:
+                        dirstate.copy(copies[f], f)
+            if p2node == nullid:
+                for f, s in sorted(dirstate.copies().items()):
+                    if f not in pctx and s not in pctx:
+                        dirstate.copy(None, f)
 
     def _fileinfo(self, path):
         # populate __dict__['_manifest'] as workingctx has no _manifestdelta
@@ -1534,7 +1594,7 @@
         return self._repo.dirstate.flagfunc(self._buildflagfunc)
 
     def flags(self, path):
-        if r'_manifest' in self.__dict__:
+        if '_manifest' in self.__dict__:
             try:
                 return self._manifest.flags(path)
             except KeyError:
@@ -1552,7 +1612,7 @@
         )
 
     def dirty(self, missing=False, merge=True, branch=True):
-        b"check whether a working directory is modified"
+        """check whether a working directory is modified"""
         # check subrepos first
         for s in sorted(self.substate):
             if self.sub(s).dirty(missing=missing):
@@ -1659,15 +1719,18 @@
         default=b'glob',
         listsubrepos=False,
         badfn=None,
+        cwd=None,
     ):
         r = self._repo
+        if not cwd:
+            cwd = r.getcwd()
 
         # Only a case insensitive filesystem needs magic to translate user input
         # to actual case in the filesystem.
         icasefs = not util.fscasesensitive(r.root)
         return matchmod.match(
             r.root,
-            r.getcwd(),
+            cwd,
             pats,
             include,
             exclude,
@@ -1931,6 +1994,7 @@
             for f in self.removed():
                 self._repo.dirstate.drop(f)
             self._repo.dirstate.setparents(node)
+            self._repo._quick_access_changeid_invalidate()
 
         # write changes out explicitly, because nesting wlock at
         # runtime may prevent 'wlock.release()' in 'repo.commit()'
@@ -2080,7 +2144,7 @@
             # warned and backed up
             if wvfs.isdir(f) and not wvfs.islink(f):
                 wvfs.rmtree(f, forcibly=True)
-            for p in reversed(list(util.finddirs(f))):
+            for p in reversed(list(pathutil.finddirs(f))):
                 if wvfs.isfileorlink(p):
                     wvfs.unlink(p)
                     break
@@ -2120,6 +2184,10 @@
         # ``overlayworkingctx`` (e.g. with --collapse).
         util.clearcachedproperty(self, b'_manifest')
 
+    def setparents(self, p1node, p2node=nullid):
+        assert p1node == self._wrappedctx.node()
+        self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
+
     def data(self, path):
         if self.isdirty(path):
             if self._cache[path][b'exists']:
@@ -2183,7 +2251,7 @@
         ]
 
     def p1copies(self):
-        copies = self._repo._wrappedctx.p1copies().copy()
+        copies = {}
         narrowmatch = self._repo.narrowmatch()
         for f in self._cache.keys():
             if not narrowmatch(f):
@@ -2195,7 +2263,7 @@
         return copies
 
     def p2copies(self):
-        copies = self._repo._wrappedctx.p2copies().copy()
+        copies = {}
         narrowmatch = self._repo.narrowmatch()
         for f in self._cache.keys():
             if not narrowmatch(f):
@@ -2374,9 +2442,9 @@
         ``text`` is the commit message.
         ``parents`` (optional) are rev numbers.
         """
-        # Default parents to the wrapped contexts' if not passed.
+        # Default parents to the wrapped context if not passed.
         if parents is None:
-            parents = self._wrappedctx.parents()
+            parents = self.parents()
             if len(parents) == 1:
                 parents = (parents[0], None)
 
@@ -2404,6 +2472,9 @@
                 # necessary for memctx to register a deletion.
                 return None
 
+        if branch is None:
+            branch = self._wrappedctx.branch()
+
         return memctx(
             self._repo,
             parents,
@@ -2697,7 +2768,7 @@
         date=None,
         extra=None,
         branch=None,
-        editor=False,
+        editor=None,
     ):
         super(memctx, self).__init__(
             repo, text, user, date, extra, branch=branch
@@ -2858,7 +2929,7 @@
         user=None,
         date=None,
         extra=None,
-        editor=False,
+        editor=None,
     ):
         if text is None:
             text = originalctx.description()
--- a/mercurial/copies.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/copies.py	Tue Jan 21 13:14:51 2020 -0500
@@ -8,6 +8,7 @@
 from __future__ import absolute_import
 
 import collections
+import multiprocessing
 import os
 
 from .i18n import _
@@ -63,12 +64,12 @@
             del t[k]
 
 
-def _chain(a, b):
-    """chain two sets of copies 'a' and 'b'"""
-    t = a.copy()
-    for k, v in pycompat.iteritems(b):
-        t[k] = t.get(v, v)
-    return t
+def _chain(prefix, suffix):
+    """chain two sets of copies 'prefix' and 'suffix'"""
+    result = prefix.copy()
+    for key, value in pycompat.iteritems(suffix):
+        result[key] = prefix.get(value, value)
+    return result
 
 
 def _tracefile(fctx, am, basemf):
@@ -231,7 +232,7 @@
             else:
                 p1copies = {}
                 p2copies = {}
-                removed = ()
+                removed = []
             return p1, p2, p1copies, p2copies, removed
 
     else:
@@ -281,10 +282,28 @@
     iterrevs &= mrset
     iterrevs.update(roots)
     iterrevs.remove(b.rev())
-    all_copies = {r: {} for r in roots}
+    revs = sorted(iterrevs)
+    return _combinechangesetcopies(revs, children, b.rev(), revinfo, match)
+
+
+def _combinechangesetcopies(revs, children, targetrev, revinfo, match):
+    """combine the copies information for each item of iterrevs
+
+    revs: sorted iterable of revision to visit
+    children: a {parent: [children]} mapping.
+    targetrev: the final copies destination revision (not in iterrevs)
+    revinfo(rev): a function that return (p1, p2, p1copies, p2copies, removed)
+    match: a matcher
+
+    It returns the aggregated copies information for `targetrev`.
+    """
+    all_copies = {}
     alwaysmatch = match.always()
-    for r in sorted(iterrevs):
-        copies = all_copies.pop(r)
+    for r in revs:
+        copies = all_copies.pop(r, None)
+        if copies is None:
+            # this is a root
+            copies = {}
         for i, c in enumerate(children[r]):
             p1, p2, p1copies, p2copies, removed = revinfo(c)
             if r == p1:
@@ -333,7 +352,7 @@
                 else:
                     newcopies.update(othercopies)
                     all_copies[c] = newcopies
-    return all_copies[b.rev()]
+    return all_copies[targetrev]
 
 
 def _forwardcopies(a, b, base=None, match=None):
@@ -837,30 +856,26 @@
         return False
 
 
-def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
-    """reproduce copies from fromrev to rev in the dirstate
+def graftcopies(wctx, ctx, base):
+    """reproduce copies between base and ctx in the wctx
 
-    If skiprev is specified, it's a revision that should be used to
-    filter copy records. Any copies that occur between fromrev and
-    skiprev will not be duplicated, even if they appear in the set of
-    copies between fromrev and rev.
+    Unlike mergecopies(), this function will only consider copies between base
+    and ctx; it will ignore copies between base and wctx. Also unlike
+    mergecopies(), this function will apply copies to the working copy (instead
+    of just returning information about the copies). That makes it cheaper
+    (especially in the common case of base==ctx.p1()) and useful also when
+    experimental.copytrace=off.
+
+    merge.update() will have already marked most copies, but it will only
+    mark copies if it thinks the source files are related (see
+    merge._related()). It will also not mark copies if the file wasn't modified
+    on the local side. This function adds the copies that were "missed"
+    by merge.update().
     """
-    exclude = {}
-    ctraceconfig = repo.ui.config(b'experimental', b'copytrace')
-    bctrace = stringutil.parsebool(ctraceconfig)
-    if skiprev is not None and (
-        ctraceconfig == b'heuristics' or bctrace or bctrace is None
-    ):
-        # copytrace='off' skips this line, but not the entire function because
-        # the line below is O(size of the repo) during a rebase, while the rest
-        # of the function is much faster (and is required for carrying copy
-        # metadata across the rebase anyway).
-        exclude = pathcopies(repo[fromrev], repo[skiprev])
-    for dst, src in pycompat.iteritems(pathcopies(repo[fromrev], repo[rev])):
-        if dst in exclude:
-            continue
-        if dst in wctx:
-            wctx[dst].markcopied(src)
+    new_copies = pathcopies(base, ctx)
+    _filter(wctx.p1(), wctx, new_copies)
+    for dst, src in pycompat.iteritems(new_copies):
+        wctx[dst].markcopied(src)
 
 
 def computechangesetfilesadded(ctx):
@@ -989,6 +1004,102 @@
 
 
 def getsidedataadder(srcrepo, destrepo):
+    use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
+    if pycompat.iswindows or not use_w:
+        return _get_simple_sidedata_adder(srcrepo, destrepo)
+    else:
+        return _get_worker_sidedata_adder(srcrepo, destrepo)
+
+
+def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
+    """The function used by worker precomputing sidedata
+
+    It read an input queue containing revision numbers
+    It write in an output queue containing (rev, <sidedata-map>)
+
+    The `None` input value is used as a stop signal.
+
+    The `tokens` semaphore is user to avoid having too many unprocessed
+    entries. The workers needs to acquire one token before fetching a task.
+    They will be released by the consumer of the produced data.
+    """
+    tokens.acquire()
+    rev = revs_queue.get()
+    while rev is not None:
+        data = _getsidedata(srcrepo, rev)
+        sidedata_queue.put((rev, data))
+        tokens.acquire()
+        rev = revs_queue.get()
+    # processing of `None` is completed, release the token.
+    tokens.release()
+
+
+BUFF_PER_WORKER = 50
+
+
+def _get_worker_sidedata_adder(srcrepo, destrepo):
+    """The parallel version of the sidedata computation
+
+    This code spawn a pool of worker that precompute a buffer of sidedata
+    before we actually need them"""
+    # avoid circular import copies -> scmutil -> worker -> copies
+    from . import worker
+
+    nbworkers = worker._numworkers(srcrepo.ui)
+
+    tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
+    revsq = multiprocessing.Queue()
+    sidedataq = multiprocessing.Queue()
+
+    assert srcrepo.filtername is None
+    # queue all tasks beforehand, revision numbers are small and it make
+    # synchronisation simpler
+    #
+    # Since the computation for each node can be quite expensive, the overhead
+    # of using a single queue is not revelant. In practice, most computation
+    # are fast but some are very expensive and dominate all the other smaller
+    # cost.
+    for r in srcrepo.changelog.revs():
+        revsq.put(r)
+    # queue the "no more tasks" markers
+    for i in range(nbworkers):
+        revsq.put(None)
+
+    allworkers = []
+    for i in range(nbworkers):
+        args = (srcrepo, revsq, sidedataq, tokens)
+        w = multiprocessing.Process(target=_sidedata_worker, args=args)
+        allworkers.append(w)
+        w.start()
+
+    # dictionnary to store results for revision higher than we one we are
+    # looking for. For example, if we need the sidedatamap for 42, and 43 is
+    # received, when shelve 43 for later use.
+    staging = {}
+
+    def sidedata_companion(revlog, rev):
+        sidedata = {}
+        if util.safehasattr(revlog, b'filteredrevs'):  # this is a changelog
+            # Is the data previously shelved ?
+            sidedata = staging.pop(rev, None)
+            if sidedata is None:
+                # look at the queued result until we find the one we are lookig
+                # for (shelve the other ones)
+                r, sidedata = sidedataq.get()
+                while r != rev:
+                    staging[r] = sidedata
+                    r, sidedata = sidedataq.get()
+            tokens.release()
+        return False, (), sidedata
+
+    return sidedata_companion
+
+
+def _get_simple_sidedata_adder(srcrepo, destrepo):
+    """The simple version of the sidedata computation
+
+    It just compute it in the same thread on request"""
+
     def sidedatacompanion(revlog, rev):
         sidedata = {}
         if util.safehasattr(revlog, 'filteredrevs'):  # this is a changelog
--- a/mercurial/crecord.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/crecord.py	Tue Jan 21 13:14:51 2020 -0500
@@ -102,7 +102,7 @@
         raise NotImplementedError(b"method must be implemented by subclass")
 
     def allchildren(self):
-        b"Return a list of all of the direct children of this node"
+        """Return a list of all of the direct children of this node"""
         raise NotImplementedError(b"method must be implemented by subclass")
 
     def nextsibling(self):
@@ -264,21 +264,23 @@
         return None
 
     def firstchild(self):
-        b"return the first child of this item, if one exists.  otherwise None."
+        """return the first child of this item, if one exists.  otherwise
+        None."""
         if len(self.hunks) > 0:
             return self.hunks[0]
         else:
             return None
 
     def lastchild(self):
-        b"return the last child of this item, if one exists.  otherwise None."
+        """return the last child of this item, if one exists.  otherwise
+        None."""
         if len(self.hunks) > 0:
             return self.hunks[-1]
         else:
             return None
 
     def allchildren(self):
-        b"return a list of all of the direct children of this node"
+        """return a list of all of the direct children of this node"""
         return self.hunks
 
     def __getattr__(self, name):
@@ -286,7 +288,7 @@
 
 
 class uihunkline(patchnode):
-    b"represents a changed line in a hunk"
+    """represents a changed line in a hunk"""
 
     def __init__(self, linetext, hunk):
         self.linetext = linetext
@@ -319,16 +321,18 @@
             return None
 
     def parentitem(self):
-        b"return the parent to the current item"
+        """return the parent to the current item"""
         return self.hunk
 
     def firstchild(self):
-        b"return the first child of this item, if one exists.  otherwise None."
+        """return the first child of this item, if one exists.  otherwise
+        None."""
         # hunk-lines don't have children
         return None
 
     def lastchild(self):
-        b"return the last child of this item, if one exists.  otherwise None."
+        """return the last child of this item, if one exists.  otherwise
+        None."""
         # hunk-lines don't have children
         return None
 
@@ -372,25 +376,27 @@
             return None
 
     def parentitem(self):
-        b"return the parent to the current item"
+        """return the parent to the current item"""
         return self.header
 
     def firstchild(self):
-        b"return the first child of this item, if one exists.  otherwise None."
+        """return the first child of this item, if one exists.  otherwise
+        None."""
         if len(self.changedlines) > 0:
             return self.changedlines[0]
         else:
             return None
 
     def lastchild(self):
-        b"return the last child of this item, if one exists.  otherwise None."
+        """return the last child of this item, if one exists.  otherwise
+        None."""
         if len(self.changedlines) > 0:
             return self.changedlines[-1]
         else:
             return None
 
     def allchildren(self):
-        b"return a list of all of the direct children of this node"
+        """return a list of all of the direct children of this node"""
         return self.changedlines
 
     def countchanges(self):
@@ -522,7 +528,7 @@
         return getattr(self._hunk, name)
 
     def __repr__(self):
-        return r'<hunk %r@%d>' % (self.filename(), self.fromline)
+        return '<hunk %r@%d>' % (self.filename(), self.fromline)
 
 
 def filterpatch(ui, chunks, chunkselector, operation=None):
@@ -569,7 +575,7 @@
     chunkselector = curseschunkselector(headerlist, ui, operation)
     # This is required for ncurses to display non-ASCII characters in
     # default user locale encoding correctly.  --immerrr
-    locale.setlocale(locale.LC_ALL, r'')
+    locale.setlocale(locale.LC_ALL, '')
     origsigtstp = sentinel = object()
     if util.safehasattr(signal, b'SIGTSTP'):
         origsigtstp = signal.getsignal(signal.SIGTSTP)
@@ -853,7 +859,7 @@
         self.currentselecteditem = currentitem
 
     def updatescroll(self):
-        b"scroll the screen to fully show the currently-selected"
+        """scroll the screen to fully show the currently-selected"""
         selstart = self.selecteditemstartline
         selend = self.selecteditemendline
 
@@ -871,7 +877,7 @@
             self.scrolllines(selstart - padstartbuffered)
 
     def scrolllines(self, numlines):
-        b"scroll the screen up (down) by numlines when numlines >0 (<0)."
+        """scroll the screen up (down) by numlines when numlines >0 (<0)."""
         self.firstlineofpadtoprint += numlines
         if self.firstlineofpadtoprint < 0:
             self.firstlineofpadtoprint = 0
@@ -973,7 +979,7 @@
                 )
 
     def toggleall(self):
-        b"toggle the applied flag of all items."
+        """toggle the applied flag of all items."""
         if self.waslasttoggleallapplied:  # then unapply them this time
             for item in self.headerlist:
                 if item.applied:
@@ -984,8 +990,19 @@
                     self.toggleapply(item)
         self.waslasttoggleallapplied = not self.waslasttoggleallapplied
 
+    def flipselections(self):
+        """
+        Flip all selections. Every selected line is unselected and vice
+        versa.
+        """
+        for header in self.headerlist:
+            for hunk in header.allchildren():
+                for line in hunk.allchildren():
+                    self.toggleapply(line)
+
     def toggleallbetween(self):
-        b"toggle applied on or off for all items in range [lastapplied,current]."
+        """toggle applied on or off for all items in range [lastapplied,
+        current]. """
         if (
             not self.lastapplieditem
             or self.currentselecteditem == self.lastapplieditem
@@ -1026,7 +1043,8 @@
             nextitem = nextitem.nextitem()
 
     def togglefolded(self, item=None, foldparent=False):
-        b"toggle folded flag of specified item (defaults to currently selected)"
+        """toggle folded flag of specified item (defaults to currently
+        selected)"""
         if item is None:
             item = self.currentselecteditem
         if foldparent or (isinstance(item, uiheader) and item.neverunfolded):
@@ -1320,7 +1338,7 @@
     def printhunklinesbefore(
         self, hunk, selected=False, towin=True, ignorefolding=False
     ):
-        b"includes start/end line indicator"
+        """includes start/end line indicator"""
         outstr = b""
         # where hunk is in list of siblings
         hunkindex = hunk.header.hunks.index(hunk)
@@ -1529,7 +1547,7 @@
         return numlines
 
     def sigwinchhandler(self, n, frame):
-        b"handle window resizing"
+        """handle window resizing"""
         try:
             curses.endwin()
             self.xscreensize, self.yscreensize = scmutil.termsize(self.ui)
@@ -1599,20 +1617,21 @@
         return colorpair
 
     def initcolorpair(self, *args, **kwargs):
-        b"same as getcolorpair."
+        """same as getcolorpair."""
         self.getcolorpair(*args, **kwargs)
 
     def helpwindow(self):
-        b"print a help window to the screen.  exit after any keypress."
+        """print a help window to the screen.  exit after any keypress."""
         helptext = _(
             """            [press any key to return to the patch-display]
 
-crecord allows you to interactively choose among the changes you have made,
-and confirm only those changes you select for further processing by the command
-you are running (commit/shelve/revert), after confirming the selected
-changes, the unselected changes are still present in your working copy, so you
-can use crecord multiple times to split large changes into smaller changesets.
-the following are valid keystrokes:
+The curses hunk selector allows you to interactively choose among the
+changes you have made, and confirm only those changes you select for
+further processing by the command you are running (such as commit,
+shelve, or revert). After confirming the selected changes, the
+unselected changes are still present in your working copy, so you can
+use the hunk selector multiple times to split large changes into
+smaller changesets. the following are valid keystrokes:
 
               x [space] : (un-)select item ([~]/[x] = partly/fully applied)
                 [enter] : (un-)select item and go to next item of same type
@@ -1629,7 +1648,7 @@
                  ctrl-l : scroll the selected line to the top of the screen
                       m : edit / resume editing the commit message
                       e : edit the currently selected hunk
-                      a : toggle amend mode, only with commit -i
+                      a : toggle all selections
                       c : confirm selected changes
                       r : review/edit and confirm selected changes
                       q : quit without confirming (no changes will be made)
@@ -1654,7 +1673,7 @@
             pass
 
     def commitMessageWindow(self):
-        b"Create a temporary commit message editing window on the screen."
+        """Create a temporary commit message editing window on the screen."""
 
         curses.raw()
         curses.def_prog_mode()
@@ -1704,7 +1723,8 @@
         self.recenterdisplayedarea()
 
     def confirmationwindow(self, windowtext):
-        b"display an informational window, then wait for and return a keypress."
+        """display an informational window, then wait for and return a
+        keypress."""
 
         confirmwin = curses.newwin(self.yscreensize, 0, 0, 0)
         try:
@@ -1747,32 +1767,6 @@
         else:
             return False
 
-    def toggleamend(self, opts, test):
-        """Toggle the amend flag.
-
-        When the amend flag is set, a commit will modify the most recently
-        committed changeset, instead of creating a new changeset.  Otherwise, a
-        new changeset will be created (the normal commit behavior).
-        """
-
-        if opts.get(b'amend') is None:
-            opts[b'amend'] = True
-            msg = _(
-                b"Amend option is turned on -- committing the currently "
-                b"selected changes will not create a new changeset, but "
-                b"instead update the most recently committed changeset.\n\n"
-                b"Press any key to continue."
-            )
-        elif opts.get(b'amend') is True:
-            opts[b'amend'] = None
-            msg = _(
-                b"Amend option is turned off -- committing the currently "
-                b"selected changes will create a new changeset.\n\n"
-                b"Press any key to continue."
-            )
-        if not test:
-            self.confirmationwindow(msg)
-
     def recenterdisplayedarea(self):
         """
         once we scrolled with pg up pg down we can be pointing outside of the
@@ -1904,7 +1898,7 @@
         elif keypressed in ["q"]:
             raise error.Abort(_(b'user quit'))
         elif keypressed in ['a']:
-            self.toggleamend(self.opts, test)
+            self.flipselections()
         elif keypressed in ["c"]:
             return True
         elif keypressed in ["r"]:
--- a/mercurial/dagparser.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/dagparser.py	Tue Jan 21 13:14:51 2020 -0500
@@ -168,7 +168,9 @@
     if not desc:
         return
 
-    wordchars = pycompat.bytestr(string.ascii_letters + string.digits)
+    wordchars = pycompat.bytestr(
+        string.ascii_letters + string.digits
+    )  # pytype: disable=wrong-arg-types
 
     labels = {}
     p1 = -1
@@ -177,7 +179,9 @@
     def resolve(ref):
         if not ref:
             return p1
-        elif ref[0] in pycompat.bytestr(string.digits):
+        elif ref[0] in pycompat.bytestr(
+            string.digits
+        ):  # pytype: disable=wrong-arg-types
             return r - int(ref)
         else:
             return labels[ref]
@@ -211,7 +215,9 @@
 
     c = nextch()
     while c != b'\0':
-        while c in pycompat.bytestr(string.whitespace):
+        while c in pycompat.bytestr(
+            string.whitespace
+        ):  # pytype: disable=wrong-arg-types
             c = nextch()
         if c == b'.':
             yield b'n', (r, [p1])
@@ -219,7 +225,9 @@
             r += 1
             c = nextch()
         elif c == b'+':
-            c, digs = nextrun(nextch(), pycompat.bytestr(string.digits))
+            c, digs = nextrun(
+                nextch(), pycompat.bytestr(string.digits)
+            )  # pytype: disable=wrong-arg-types
             n = int(digs)
             for i in pycompat.xrange(0, n):
                 yield b'n', (r, [p1])
--- a/mercurial/debugcommands.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/debugcommands.py	Tue Jan 21 13:14:51 2020 -0500
@@ -59,6 +59,7 @@
     merge as mergemod,
     obsolete,
     obsutil,
+    pathutil,
     phases,
     policy,
     pvec,
@@ -330,9 +331,9 @@
                     )
                 )
 
-        chunkdata = gen.changelogheader()
+        gen.changelogheader()
         showchunks(b"changelog")
-        chunkdata = gen.manifestheader()
+        gen.manifestheader()
         showchunks(b"manifest")
         for chunkdata in iter(gen.filelogheader, {}):
             fname = chunkdata[b'filename']
@@ -340,7 +341,7 @@
     else:
         if isinstance(gen, bundle2.unbundle20):
             raise error.Abort(_(b'use debugbundle2 for this file'))
-        chunkdata = gen.changelogheader()
+        gen.changelogheader()
         for deltadata in gen.deltaiter():
             node, p1, p2, cs, deltabase, delta, flags = deltadata
             ui.write(b"%s%s\n" % (indent_string, hex(node)))
@@ -393,7 +394,7 @@
     if not isinstance(gen, bundle2.unbundle20):
         raise error.Abort(_(b'not a bundle2 file'))
     ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
-    parttypes = opts.get(r'part_type', [])
+    parttypes = opts.get('part_type', [])
     for part in gen.iterparts():
         if parttypes and part.type not in parttypes:
             continue
@@ -480,8 +481,8 @@
             ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
             errors += 1
     if errors:
-        error = _(b".hg/dirstate inconsistent with current parent's manifest")
-        raise error.Abort(error)
+        errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
+        raise error.Abort(errstr)
 
 
 @command(
@@ -492,7 +493,7 @@
 def debugcolor(ui, repo, **opts):
     """show available color, effects or style"""
     ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
-    if opts.get(r'style'):
+    if opts.get('style'):
         return _debugdisplaystyle(ui)
     else:
         return _debugdisplaycolor(ui)
@@ -573,8 +574,8 @@
 
     Otherwise, the changelog DAG of the current repo is emitted.
     """
-    spaces = opts.get(r'spaces')
-    dots = opts.get(r'dots')
+    spaces = opts.get('spaces')
+    dots = opts.get('dots')
     if file_:
         rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
         revs = set((int(r) for r in revs))
@@ -587,8 +588,8 @@
 
     elif repo:
         cl = repo.changelog
-        tags = opts.get(r'tags')
-        branches = opts.get(r'branches')
+        tags = opts.get('tags')
+        branches = opts.get('branches')
         if tags:
             labels = {}
             for l, n in repo.tags().items():
@@ -651,8 +652,8 @@
 )
 def debugdate(ui, date, range=None, **opts):
     """parse and display a date"""
-    if opts[r"extended"]:
-        d = dateutil.parsedate(date, util.extendeddateformats)
+    if opts["extended"]:
+        d = dateutil.parsedate(date, dateutil.extendeddateformats)
     else:
         d = dateutil.parsedate(date)
     ui.writenoi18n(b"internal: %d %d\n" % d)
@@ -861,10 +862,10 @@
 def debugstate(ui, repo, **opts):
     """show the contents of the current dirstate"""
 
-    nodates = not opts[r'dates']
-    if opts.get(r'nodates') is not None:
+    nodates = not opts['dates']
+    if opts.get('nodates') is not None:
         nodates = True
-    datesort = opts.get(r'datesort')
+    datesort = opts.get('datesort')
 
     if datesort:
         keyfunc = lambda x: (x[1][3], x[0])  # sort by mtime, then by filename
@@ -877,7 +878,7 @@
             timestr = b'set                 '
         else:
             timestr = time.strftime(
-                r"%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
+                "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
             )
             timestr = encoding.strtolocal(timestr)
         if ent[1] & 0o20000:
@@ -1028,7 +1029,12 @@
     fm = ui.formatter(b'debugextensions', opts)
     for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
         isinternal = extensions.ismoduleinternal(extmod)
-        extsource = pycompat.fsencode(extmod.__file__)
+        extsource = None
+
+        if util.safehasattr(extmod, '__file__'):
+            extsource = pycompat.fsencode(extmod.__file__)
+        elif getattr(sys, 'oxidized', False):
+            extsource = pycompat.sysexecutable
         if isinternal:
             exttestedwith = []  # never expose magic string to users
         else:
@@ -1165,7 +1171,7 @@
         files.update(ctx.files())
         files.update(ctx.substate)
 
-    m = ctx.matchfileset(expr)
+    m = ctx.matchfileset(repo.getcwd(), expr)
     if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
         ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
     for f in sorted(files):
@@ -1298,11 +1304,11 @@
         raise error.Abort(b"getbundle() not supported by target repository")
     args = {}
     if common:
-        args[r'common'] = [bin(s) for s in common]
+        args['common'] = [bin(s) for s in common]
     if head:
-        args[r'heads'] = [bin(s) for s in head]
+        args['heads'] = [bin(s) for s in head]
     # TODO: get desired bundlecaps from command line.
-    args[r'bundlecaps'] = None
+    args['bundlecaps'] = None
     bundle = repo.getbundle(b'debug', **args)
 
     bundletype = opts.get(b'type', b'bzip2').lower()
@@ -1343,7 +1349,7 @@
                     ignored = nf
                     ignoredata = repo.dirstate._ignorefileandline(nf)
                 else:
-                    for p in util.finddirs(nf):
+                    for p in pathutil.finddirs(nf):
                         if ignore(p):
                             ignored = p
                             ignoredata = repo.dirstate._ignorefileandline(p)
@@ -1469,6 +1475,12 @@
     )
 
     # Python
+    pythonlib = None
+    if util.safehasattr(os, '__file__'):
+        pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
+    elif getattr(sys, 'oxidized', False):
+        pythonlib = pycompat.sysexecutable
+
     fm.write(
         b'pythonexe',
         _(b"checking Python executable (%s)\n"),
@@ -1482,7 +1494,7 @@
     fm.write(
         b'pythonlib',
         _(b"checking Python lib (%s)...\n"),
-        os.path.dirname(pycompat.fsencode(os.__file__)),
+        pythonlib or _(b"unknown"),
     )
 
     security = set(sslutil.supportedprotocols)
@@ -1526,13 +1538,19 @@
     )
 
     # compiled modules
+    hgmodules = None
+    if util.safehasattr(sys.modules[__name__], '__file__'):
+        hgmodules = os.path.dirname(pycompat.fsencode(__file__))
+    elif getattr(sys, 'oxidized', False):
+        hgmodules = pycompat.sysexecutable
+
     fm.write(
         b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
     )
     fm.write(
         b'hgmodules',
         _(b"checking installed modules (%s)...\n"),
-        os.path.dirname(pycompat.fsencode(__file__)),
+        hgmodules or _(b"unknown"),
     )
 
     rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
@@ -1543,7 +1561,7 @@
         err = None
         try:
             if cext:
-                from .cext import (
+                from .cext import (  # pytype: disable=import-error
                     base85,
                     bdiff,
                     mpatch,
@@ -1553,7 +1571,7 @@
                 # quiet pyflakes
                 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
             if rustext:
-                from .rustext import (
+                from .rustext import (  # pytype: disable=import-error
                     ancestor,
                     dirstate,
                 )
@@ -1775,21 +1793,21 @@
 
     """
 
-    if opts.get(r'force_lock'):
+    if opts.get('force_lock'):
         repo.svfs.unlink(b'lock')
-    if opts.get(r'force_wlock'):
+    if opts.get('force_wlock'):
         repo.vfs.unlink(b'wlock')
-    if opts.get(r'force_lock') or opts.get(r'force_wlock'):
+    if opts.get('force_lock') or opts.get('force_wlock'):
         return 0
 
     locks = []
     try:
-        if opts.get(r'set_wlock'):
+        if opts.get('set_wlock'):
             try:
                 locks.append(repo.wlock(False))
             except error.LockHeld:
                 raise error.Abort(_(b'wlock is already held'))
-        if opts.get(r'set_lock'):
+        if opts.get('set_lock'):
             try:
                 locks.append(repo.lock(False))
             except error.LockHeld:
@@ -1871,7 +1889,7 @@
             )
             raise error.Abort(msg)
 
-    if opts.get(r'clear'):
+    if opts.get('clear'):
         with repo.wlock():
             cache = getcache()
             cache.clear(clear_persisted_data=True)
@@ -2265,7 +2283,7 @@
         if fixpaths:
             spec = spec.replace(pycompat.ossep, b'/')
         speclen = len(spec)
-        fullpaths = opts[r'full']
+        fullpaths = opts['full']
         files, dirs = set(), set()
         adddir, addfile = dirs.add, files.add
         for f, st in pycompat.iteritems(dirstate):
@@ -2283,11 +2301,11 @@
         return files, dirs
 
     acceptable = b''
-    if opts[r'normal']:
+    if opts['normal']:
         acceptable += b'nm'
-    if opts[r'added']:
+    if opts['added']:
         acceptable += b'a'
-    if opts[r'removed']:
+    if opts['removed']:
         acceptable += b'r'
     cwd = repo.getcwd()
     if not specs:
@@ -2526,7 +2544,7 @@
         dirstate = repo.dirstate
         changedfiles = None
         # See command doc for what minimal does.
-        if opts.get(r'minimal'):
+        if opts.get('minimal'):
             manifestfiles = set(ctx.manifest().keys())
             dirstatefiles = set(dirstate)
             manifestonly = manifestfiles - dirstatefiles
@@ -3147,13 +3165,13 @@
         ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
         sm = difflib.SequenceMatcher(None, arevs, brevs)
         for tag, alo, ahi, blo, bhi in sm.get_opcodes():
-            if tag in (r'delete', r'replace'):
+            if tag in ('delete', 'replace'):
                 for c in arevs[alo:ahi]:
                     ui.write(b'-%d\n' % c, label=b'diff.deleted')
-            if tag in (r'insert', r'replace'):
+            if tag in ('insert', 'replace'):
                 for c in brevs[blo:bhi]:
                     ui.write(b'+%d\n' % c, label=b'diff.inserted')
-            if tag == r'equal':
+            if tag == 'equal':
                 for c in arevs[alo:ahi]:
                     ui.write(b' %d\n' % c)
         return 1
@@ -3200,16 +3218,19 @@
         raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
 
     if opts[b'logiofd']:
-        # Line buffered because output is line based.
+        # Ideally we would be line buffered. But line buffering in binary
+        # mode isn't supported and emits a warning in Python 3.8+. Disabling
+        # buffering could have performance impacts. But since this isn't
+        # performance critical code, it should be fine.
         try:
-            logfh = os.fdopen(int(opts[b'logiofd']), r'ab', 1)
+            logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
         except OSError as e:
             if e.errno != errno.ESPIPE:
                 raise
             # can't seek a pipe, so `ab` mode fails on py3
-            logfh = os.fdopen(int(opts[b'logiofd']), r'wb', 1)
+            logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
     elif opts[b'logiofile']:
-        logfh = open(opts[b'logiofile'], b'ab', 1)
+        logfh = open(opts[b'logiofile'], b'ab', 0)
 
     s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
     s.serve_forever()
@@ -3391,7 +3412,7 @@
         ctx = repo[rev]
         ui.write(b'%s\n' % ctx2str(ctx))
         for succsset in obsutil.successorssets(
-            repo, ctx.node(), closest=opts[r'closest'], cache=cache
+            repo, ctx.node(), closest=opts['closest'], cache=cache
         ):
             if succsset:
                 ui.write(b'    ')
@@ -3421,15 +3442,15 @@
     Use --verbose to print the parsed tree.
     """
     revs = None
-    if opts[r'rev']:
+    if opts['rev']:
         if repo is None:
             raise error.RepoError(
                 _(b'there is no Mercurial repository here (.hg not found)')
             )
-        revs = scmutil.revrange(repo, opts[r'rev'])
+        revs = scmutil.revrange(repo, opts['rev'])
 
     props = {}
-    for d in opts[r'define']:
+    for d in opts['define']:
         try:
             k, v = (e.strip() for e in d.split(b'=', 1))
             if not k or k == b'ui':
@@ -3985,27 +4006,27 @@
 
         url, authinfo = u.authinfo()
         openerargs = {
-            r'useragent': b'Mercurial debugwireproto',
+            'useragent': b'Mercurial debugwireproto',
         }
 
         # Turn pipes/sockets into observers so we can log I/O.
         if ui.verbose:
             openerargs.update(
                 {
-                    r'loggingfh': ui,
-                    r'loggingname': b's',
-                    r'loggingopts': {r'logdata': True, r'logdataapis': False,},
+                    'loggingfh': ui,
+                    'loggingname': b's',
+                    'loggingopts': {'logdata': True, 'logdataapis': False,},
                 }
             )
 
         if ui.debugflag:
-            openerargs[r'loggingopts'][r'logdataapis'] = True
+            openerargs['loggingopts']['logdataapis'] = True
 
         # Don't send default headers when in raw mode. This allows us to
         # bypass most of the behavior of our URL handling code so we can
         # have near complete control over what's sent on the wire.
         if opts[b'peer'] == b'raw':
-            openerargs[r'sendaccept'] = False
+            openerargs['sendaccept'] = False
 
         opener = urlmod.opener(ui, authinfo, **openerargs)
 
@@ -4105,7 +4126,7 @@
             ui.status(_(b'sending %s command\n') % command)
 
             if b'PUSHFILE' in args:
-                with open(args[b'PUSHFILE'], r'rb') as fh:
+                with open(args[b'PUSHFILE'], 'rb') as fh:
                     del args[b'PUSHFILE']
                     res, output = peer._callpush(
                         command, fh, **pycompat.strkwargs(args)
@@ -4143,6 +4164,7 @@
                 _(b'sending batch with %d sub-commands\n')
                 % len(batchedcommands)
             )
+            assert peer is not None
             for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
                 ui.status(
                     _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
@@ -4213,8 +4235,8 @@
                 getattr(e, 'read', lambda: None)()
                 continue
 
-            ct = res.headers.get(r'Content-Type')
-            if ct == r'application/mercurial-cbor':
+            ct = res.headers.get('Content-Type')
+            if ct == 'application/mercurial-cbor':
                 ui.write(
                     _(b'cbor> %s\n')
                     % stringutil.pprint(
@@ -4223,6 +4245,7 @@
                 )
 
         elif action == b'close':
+            assert peer is not None
             peer.close()
         elif action == b'readavailable':
             if not stdout or not stderr:
--- a/mercurial/default.d/mergetools.rc	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-# Some default global settings for common merge tools
-
-[merge-tools]
-kdiff3.args=--auto --L1 $labelbase --L2 $labellocal --L3 $labelother $base $local $other -o $output
-kdiff3.regkey=Software\KDiff3
-kdiff3.regkeyalt=Software\Wow6432Node\KDiff3
-kdiff3.regappend=\kdiff3.exe
-kdiff3.fixeol=True
-kdiff3.gui=True
-kdiff3.diffargs=--L1 $plabel1 --L2 $clabel $parent $child
-
-gvimdiff.args=--nofork -d -g -O $local $other $base
-gvimdiff.regkey=Software\Vim\GVim
-gvimdiff.regkeyalt=Software\Wow6432Node\Vim\GVim
-gvimdiff.regname=path
-gvimdiff.priority=-9
-gvimdiff.diffargs=--nofork -d -g -O $parent $child
-
-vimdiff.args=$local $other $base -c 'redraw | echomsg "hg merge conflict, type \":cq\" to abort vimdiff"'
-vimdiff.check=changed
-vimdiff.priority=-10
-
-merge.check=conflicts
-merge.priority=-100
-
-gpyfm.gui=True
-
-meld.gui=True
-meld.args=--label=$labellocal $local --label='merged' $base --label=$labelother $other -o $output --auto-merge
-meld.check=changed
-meld.diffargs=-a --label=$plabel1 $parent --label=$clabel $child
-
-tkdiff.args=$local $other -a $base -o $output
-tkdiff.gui=True
-tkdiff.priority=-8
-tkdiff.diffargs=-L $plabel1 $parent -L $clabel $child
-
-xxdiff.args=--show-merged-pane --exit-with-merge-status --title1 $labellocal --title2 $labelbase --title3 $labelother --merged-filename $output --merge $local $base $other
-xxdiff.gui=True
-xxdiff.priority=-8
-xxdiff.diffargs=--title1 $plabel1 $parent --title2 $clabel $child
-
-diffmerge.regkey=Software\SourceGear\SourceGear DiffMerge\
-diffmerge.regkeyalt=Software\Wow6432Node\SourceGear\SourceGear DiffMerge\
-diffmerge.regname=Location
-diffmerge.priority=-7
-diffmerge.args=-nosplash -merge -title1=$labellocal -title2=merged -title3=$labelother $local $base $other -result=$output
-diffmerge.check=changed
-diffmerge.gui=True
-diffmerge.diffargs=--nosplash --title1=$plabel1 --title2=$clabel $parent $child
-
-p4merge.args=$base $local $other $output
-p4merge.regkey=Software\Perforce\Environment
-p4merge.regkeyalt=Software\Wow6432Node\Perforce\Environment
-p4merge.regname=P4INSTROOT
-p4merge.regappend=\p4merge.exe
-p4merge.gui=True
-p4merge.priority=-8
-p4merge.diffargs=$parent $child
-
-p4mergeosx.executable = /Applications/p4merge.app/Contents/MacOS/p4merge
-p4mergeosx.args = $base $local $other $output
-p4mergeosx.gui = True
-p4mergeosx.priority=-8
-p4mergeosx.diffargs=$parent $child
-
-tortoisemerge.args=/base:$base /mine:$local /theirs:$other /merged:$output
-tortoisemerge.regkey=Software\TortoiseSVN
-tortoisemerge.regkeyalt=Software\Wow6432Node\TortoiseSVN
-tortoisemerge.check=changed
-tortoisemerge.gui=True
-tortoisemerge.priority=-8
-tortoisemerge.diffargs=/base:$parent /mine:$child /basename:$plabel1 /minename:$clabel
-
-ecmerge.args=$base $local $other --mode=merge3 --title0=$labelbase --title1=$labellocal --title2=$labelother --to=$output
-ecmerge.regkey=Software\Elli\xc3\xa9 Computing\Merge
-ecmerge.regkeyalt=Software\Wow6432Node\Elli\xc3\xa9 Computing\Merge
-ecmerge.gui=True
-ecmerge.diffargs=$parent $child --mode=diff2 --title1=$plabel1 --title2=$clabel
-
-# editmerge is a small script shipped in contrib.
-# It needs this config otherwise it behaves the same as internal:local
-editmerge.args=$output
-editmerge.check=changed
-editmerge.premerge=keep
-
-filemerge.executable=/Developer/Applications/Utilities/FileMerge.app/Contents/MacOS/FileMerge
-filemerge.args=-left $other -right $local -ancestor $base -merge $output
-filemerge.gui=True
-
-filemergexcode.executable=/Applications/Xcode.app/Contents/Applications/FileMerge.app/Contents/MacOS/FileMerge
-filemergexcode.args=-left $other -right $local -ancestor $base -merge $output
-filemergexcode.gui=True
-
-; Windows version of Beyond Compare
-beyondcompare3.args=$local $other $base $output /ro /lefttitle=$labellocal /centertitle=$labelbase /righttitle=$labelother /automerge /reviewconflicts /solo
-beyondcompare3.regkey=Software\Scooter Software\Beyond Compare 3
-beyondcompare3.regname=ExePath
-beyondcompare3.gui=True
-beyondcompare3.priority=-2
-beyondcompare3.diffargs=/lro /lefttitle=$plabel1 /righttitle=$clabel /solo /expandall $parent $child
-
-; Linux version of Beyond Compare
-bcompare.args=$local $other $base -mergeoutput=$output -ro -lefttitle=$labellocal -centertitle=$labelbase -righttitle=$labelother -outputtitle=merged -automerge -reviewconflicts -solo
-bcompare.gui=True
-bcompare.priority=-1
-bcompare.diffargs=-lro -lefttitle=$plabel1 -righttitle=$clabel -solo -expandall $parent $child
-
-; OS X version of Beyond Compare
-bcomposx.executable = /Applications/Beyond Compare.app/Contents/MacOS/bcomp
-bcomposx.args=$local $other $base -mergeoutput=$output -ro -lefttitle=$labellocal -centertitle=$labelbase -righttitle=$labelother -outputtitle=merged -automerge -reviewconflicts -solo
-bcomposx.gui=True
-bcomposx.priority=-1
-bcomposx.diffargs=-lro -lefttitle=$plabel1 -righttitle=$clabel -solo -expandall $parent $child
-
-winmerge.args=/e /x /wl /ub /dl $labelother /dr $labellocal $other $local $output
-winmerge.regkey=Software\Thingamahoochie\WinMerge
-winmerge.regkeyalt=Software\Wow6432Node\Thingamahoochie\WinMerge\
-winmerge.regname=Executable
-winmerge.check=changed
-winmerge.gui=True
-winmerge.priority=-10
-winmerge.diffargs=/r /e /x /ub /wl /dl $plabel1 /dr $clabel $parent $child
-
-araxis.regkey=SOFTWARE\Classes\TypeLib\{46799e0a-7bd1-4330-911c-9660bb964ea2}\7.0\HELPDIR
-araxis.regappend=\ConsoleCompare.exe
-araxis.priority=-2
-araxis.args=/3 /a2 /wait /merge /title1:"Other" /title2:"Base" /title3:"Local :"$local $other $base $local $output
-araxis.checkconflict=True
-araxis.binary=True
-araxis.gui=True
-araxis.diffargs=/2 /wait /title1:$plabel1 /title2:$clabel $parent $child
-
-diffuse.priority=-3
-diffuse.args=$local $base $other
-diffuse.gui=True
-diffuse.diffargs=$parent $child
-
-UltraCompare.regkey=Software\Microsoft\Windows\CurrentVersion\App Paths\UC.exe
-UltraCompare.regkeyalt=Software\Wow6432Node\Microsoft\Windows\CurrentVersion\App Paths\UC.exe
-UltraCompare.args = $base $local $other -title1 base -title3 other
-UltraCompare.priority = -2
-UltraCompare.gui = True
-UltraCompare.binary = True
-UltraCompare.check = conflicts,changed
-UltraCompare.diffargs=$child $parent -title1 $clabel -title2 $plabel1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/defaultrc/mergetools.rc	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,146 @@
+# Some default global settings for common merge tools
+
+[merge-tools]
+kdiff3.args=--auto --L1 $labelbase --L2 $labellocal --L3 $labelother $base $local $other -o $output
+kdiff3.regkey=Software\KDiff3
+kdiff3.regkeyalt=Software\Wow6432Node\KDiff3
+kdiff3.regappend=\kdiff3.exe
+kdiff3.fixeol=True
+kdiff3.gui=True
+kdiff3.diffargs=--L1 $plabel1 --L2 $clabel $parent $child
+
+gvimdiff.args=--nofork -d -g -O $local $other $base
+gvimdiff.regkey=Software\Vim\GVim
+gvimdiff.regkeyalt=Software\Wow6432Node\Vim\GVim
+gvimdiff.regname=path
+gvimdiff.priority=-9
+gvimdiff.diffargs=--nofork -d -g -O $parent $child
+
+vimdiff.args=$local $other $base -c 'redraw | echomsg "hg merge conflict, type \":cq\" to abort vimdiff"'
+vimdiff.check=changed
+vimdiff.priority=-10
+
+merge.check=conflicts
+merge.priority=-100
+
+gpyfm.gui=True
+
+meld.gui=True
+meld.args=--label=$labellocal $local --label='merged' $base --label=$labelother $other -o $output --auto-merge
+meld.check=changed
+meld.diffargs=-a --label=$plabel1 $parent --label=$clabel $child
+
+tkdiff.args=$local $other -a $base -o $output
+tkdiff.gui=True
+tkdiff.priority=-8
+tkdiff.diffargs=-L $plabel1 $parent -L $clabel $child
+
+xxdiff.args=--show-merged-pane --exit-with-merge-status --title1 $labellocal --title2 $labelbase --title3 $labelother --merged-filename $output --merge $local $base $other
+xxdiff.gui=True
+xxdiff.priority=-8
+xxdiff.diffargs=--title1 $plabel1 $parent --title2 $clabel $child
+
+diffmerge.regkey=Software\SourceGear\SourceGear DiffMerge\
+diffmerge.regkeyalt=Software\Wow6432Node\SourceGear\SourceGear DiffMerge\
+diffmerge.regname=Location
+diffmerge.priority=-7
+diffmerge.args=-nosplash -merge -title1=$labellocal -title2=merged -title3=$labelother $local $base $other -result=$output
+diffmerge.check=changed
+diffmerge.gui=True
+diffmerge.diffargs=--nosplash --title1=$plabel1 --title2=$clabel $parent $child
+
+p4merge.args=$base $local $other $output
+p4merge.regkey=Software\Perforce\Environment
+p4merge.regkeyalt=Software\Wow6432Node\Perforce\Environment
+p4merge.regname=P4INSTROOT
+p4merge.regappend=\p4merge.exe
+p4merge.gui=True
+p4merge.priority=-8
+p4merge.diffargs=$parent $child
+
+p4mergeosx.executable = /Applications/p4merge.app/Contents/MacOS/p4merge
+p4mergeosx.args = $base $local $other $output
+p4mergeosx.gui = True
+p4mergeosx.priority=-8
+p4mergeosx.diffargs=$parent $child
+
+tortoisemerge.args=/base:$base /mine:$local /theirs:$other /merged:$output
+tortoisemerge.regkey=Software\TortoiseSVN
+tortoisemerge.regkeyalt=Software\Wow6432Node\TortoiseSVN
+tortoisemerge.check=changed
+tortoisemerge.gui=True
+tortoisemerge.priority=-8
+tortoisemerge.diffargs=/base:$parent /mine:$child /basename:$plabel1 /minename:$clabel
+
+ecmerge.args=$base $local $other --mode=merge3 --title0=$labelbase --title1=$labellocal --title2=$labelother --to=$output
+ecmerge.regkey=Software\Elli\xc3\xa9 Computing\Merge
+ecmerge.regkeyalt=Software\Wow6432Node\Elli\xc3\xa9 Computing\Merge
+ecmerge.gui=True
+ecmerge.diffargs=$parent $child --mode=diff2 --title1=$plabel1 --title2=$clabel
+
+# editmerge is a small script shipped in contrib.
+# It needs this config otherwise it behaves the same as internal:local
+editmerge.args=$output
+editmerge.check=changed
+editmerge.premerge=keep
+
+filemerge.executable=/Developer/Applications/Utilities/FileMerge.app/Contents/MacOS/FileMerge
+filemerge.args=-left $other -right $local -ancestor $base -merge $output
+filemerge.gui=True
+
+filemergexcode.executable=/Applications/Xcode.app/Contents/Applications/FileMerge.app/Contents/MacOS/FileMerge
+filemergexcode.args=-left $other -right $local -ancestor $base -merge $output
+filemergexcode.gui=True
+
+; Windows version of Beyond Compare
+beyondcompare3.args=$local $other $base $output /ro /lefttitle=$labellocal /centertitle=$labelbase /righttitle=$labelother /automerge /reviewconflicts /solo
+beyondcompare3.regkey=Software\Scooter Software\Beyond Compare 3
+beyondcompare3.regname=ExePath
+beyondcompare3.gui=True
+beyondcompare3.priority=-2
+beyondcompare3.diffargs=/lro /lefttitle=$plabel1 /righttitle=$clabel /solo /expandall $parent $child
+
+; Linux version of Beyond Compare
+bcompare.args=$local $other $base -mergeoutput=$output -ro -lefttitle=$labellocal -centertitle=$labelbase -righttitle=$labelother -outputtitle=merged -automerge -reviewconflicts -solo
+bcompare.gui=True
+bcompare.priority=-1
+bcompare.diffargs=-lro -lefttitle=$plabel1 -righttitle=$clabel -solo -expandall $parent $child
+
+; OS X version of Beyond Compare
+bcomposx.executable = /Applications/Beyond Compare.app/Contents/MacOS/bcomp
+bcomposx.args=$local $other $base -mergeoutput=$output -ro -lefttitle=$labellocal -centertitle=$labelbase -righttitle=$labelother -outputtitle=merged -automerge -reviewconflicts -solo
+bcomposx.gui=True
+bcomposx.priority=-1
+bcomposx.diffargs=-lro -lefttitle=$plabel1 -righttitle=$clabel -solo -expandall $parent $child
+
+winmerge.args=/e /x /wl /ub /dl $labelother /dr $labellocal $other $local $output
+winmerge.regkey=Software\Thingamahoochie\WinMerge
+winmerge.regkeyalt=Software\Wow6432Node\Thingamahoochie\WinMerge\
+winmerge.regname=Executable
+winmerge.check=changed
+winmerge.gui=True
+winmerge.priority=-10
+winmerge.diffargs=/r /e /x /ub /wl /dl $plabel1 /dr $clabel $parent $child
+
+araxis.regkey=SOFTWARE\Classes\TypeLib\{46799e0a-7bd1-4330-911c-9660bb964ea2}\7.0\HELPDIR
+araxis.regappend=\ConsoleCompare.exe
+araxis.priority=-2
+araxis.args=/3 /a2 /wait /merge /title1:"Other" /title2:"Base" /title3:"Local :"$local $other $base $local $output
+araxis.checkconflict=True
+araxis.binary=True
+araxis.gui=True
+araxis.diffargs=/2 /wait /title1:$plabel1 /title2:$clabel $parent $child
+
+diffuse.priority=-3
+diffuse.args=$local $base $other
+diffuse.gui=True
+diffuse.diffargs=$parent $child
+
+UltraCompare.regkey=Software\Microsoft\Windows\CurrentVersion\App Paths\UC.exe
+UltraCompare.regkeyalt=Software\Wow6432Node\Microsoft\Windows\CurrentVersion\App Paths\UC.exe
+UltraCompare.args = $base $local $other -title1 base -title3 other
+UltraCompare.priority = -2
+UltraCompare.gui = True
+UltraCompare.binary = True
+UltraCompare.check = conflicts,changed
+UltraCompare.diffargs=$child $parent -title1 $clabel -title2 $plabel1
--- a/mercurial/dirstate.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/dirstate.py	Tue Jan 21 13:14:51 2020 -0500
@@ -36,8 +36,8 @@
     util as interfaceutil,
 )
 
-parsers = policy.importmod(r'parsers')
-rustmod = policy.importrust(r'dirstate')
+parsers = policy.importmod('parsers')
+rustmod = policy.importrust('dirstate')
 
 propertycache = util.propertycache
 filecache = scmutil.filecache
@@ -368,7 +368,7 @@
         rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
         check whether the dirstate has changed before rereading it.'''
 
-        for a in (r"_map", r"_branch", r"_ignore"):
+        for a in ("_map", "_branch", "_ignore"):
             if a in self.__dict__:
                 delattr(self, a)
         self._lastnormaltime = 0
@@ -404,7 +404,7 @@
                     _(b'directory %r already in dirstate') % pycompat.bytestr(f)
                 )
             # shadows
-            for d in util.finddirs(f):
+            for d in pathutil.finddirs(f):
                 if self._map.hastrackeddir(d):
                     break
                 entry = self._map.get(d)
@@ -603,19 +603,34 @@
     def rebuild(self, parent, allfiles, changedfiles=None):
         if changedfiles is None:
             # Rebuild entire dirstate
-            changedfiles = allfiles
+            to_lookup = allfiles
+            to_drop = []
             lastnormaltime = self._lastnormaltime
             self.clear()
             self._lastnormaltime = lastnormaltime
+        elif len(changedfiles) < 10:
+            # Avoid turning allfiles into a set, which can be expensive if it's
+            # large.
+            to_lookup = []
+            to_drop = []
+            for f in changedfiles:
+                if f in allfiles:
+                    to_lookup.append(f)
+                else:
+                    to_drop.append(f)
+        else:
+            changedfilesset = set(changedfiles)
+            to_lookup = changedfilesset & set(allfiles)
+            to_drop = changedfilesset - to_lookup
 
         if self._origpl is None:
             self._origpl = self._pl
         self._map.setparents(parent, nullid)
-        for f in changedfiles:
-            if f in allfiles:
-                self.normallookup(f)
-            else:
-                self.drop(f)
+
+        for f in to_lookup:
+            self.normallookup(f)
+        for f in to_drop:
+            self.drop(f)
 
         self._dirty = True
 
@@ -687,8 +702,7 @@
         delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
         if delaywrite > 0:
             # do we have any files to delay for?
-            items = pycompat.iteritems(self._map)
-            for f, e in items:
+            for f, e in pycompat.iteritems(self._map):
                 if e[0] == b'n' and e[3] == now:
                     import time  # to avoid useless import
 
@@ -700,12 +714,6 @@
                     time.sleep(end - clock)
                     now = end  # trust our estimate that the end is near now
                     break
-            # since the iterator is potentially not deleted,
-            # delete the iterator to release the reference for the Rust
-            # implementation.
-            # TODO make the Rust implementation behave like Python
-            # since this would not work with a non ref-counting GC.
-            del items
 
         self._map.write(st, now)
         self._lastnormaltime = 0
@@ -714,7 +722,7 @@
     def _dirignore(self, f):
         if self._ignore(f):
             return True
-        for p in util.finddirs(f):
+        for p in pathutil.finddirs(f):
             if self._ignore(p):
                 return True
         return False
@@ -776,7 +784,6 @@
                 kind = _(b'directory')
             return _(b'unsupported file type (type is %s)') % kind
 
-        matchedir = match.explicitdir
         badfn = match.bad
         dmap = self._map
         lstat = os.lstat
@@ -830,8 +837,6 @@
                     if nf in dmap:
                         # file replaced by dir on disk but still in dirstate
                         results[nf] = None
-                    if matchedir:
-                        matchedir(nf)
                     foundadd((nf, ff))
                 elif kind == regkind or kind == lnkkind:
                     results[nf] = st
@@ -844,8 +849,6 @@
                     results[nf] = None
                 else:  # does it match a missing directory?
                     if self._map.hasdir(nf):
-                        if matchedir:
-                            matchedir(nf)
                         notfoundadd(nf)
                     else:
                         badfn(ff, encoding.strtolocal(inst.strerror))
@@ -946,6 +949,11 @@
 
         # step 1: find all explicit files
         results, work, dirsnotfound = self._walkexplicit(match, subrepos)
+        if matchtdir:
+            for d in work:
+                matchtdir(d[0])
+            for d in dirsnotfound:
+                matchtdir(d)
 
         skipstep3 = skipstep3 and not (work or dirsnotfound)
         work = [d for d in work if not dirignore(d[0])]
@@ -1075,6 +1083,46 @@
                     results[next(iv)] = st
         return results
 
+    def _rust_status(self, matcher, list_clean):
+        # Force Rayon (Rust parallelism library) to respect the number of
+        # workers. This is a temporary workaround until Rust code knows
+        # how to read the config file.
+        numcpus = self._ui.configint(b"worker", b"numcpus")
+        if numcpus is not None:
+            encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
+
+        workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
+        if not workers_enabled:
+            encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
+
+        (
+            lookup,
+            modified,
+            added,
+            removed,
+            deleted,
+            unknown,
+            clean,
+        ) = rustmod.status(
+            self._map._rustmap,
+            matcher,
+            self._rootdir,
+            bool(list_clean),
+            self._lastnormaltime,
+            self._checkexec,
+        )
+
+        status = scmutil.status(
+            modified=modified,
+            added=added,
+            removed=removed,
+            deleted=deleted,
+            unknown=unknown,
+            ignored=[],
+            clean=clean,
+        )
+        return (lookup, status)
+
     def status(self, match, subrepos, ignored, clean, unknown):
         '''Determine the status of the working copy relative to the
         dirstate and return a pair of (unsure, status), where status is of type
@@ -1099,11 +1147,14 @@
         dmap.preload()
 
         use_rust = True
+
+        allowed_matchers = (matchmod.alwaysmatcher, matchmod.exactmatcher)
+
         if rustmod is None:
             use_rust = False
         elif subrepos:
             use_rust = False
-        if bool(listunknown):
+        elif bool(listunknown):
             # Pathauditor does not exist yet in Rust, unknown files
             # can't be trusted.
             use_rust = False
@@ -1111,60 +1162,26 @@
             # Rust has no ignore mechanism yet, so don't use Rust for
             # commands that need ignore.
             use_rust = False
-        elif not match.always():
+        elif not isinstance(match, allowed_matchers):
             # Matchers have yet to be implemented
             use_rust = False
 
         if use_rust:
-            # Force Rayon (Rust parallelism library) to respect the number of
-            # workers. This is a temporary workaround until Rust code knows
-            # how to read the config file.
-            numcpus = self._ui.configint(b"worker", b"numcpus")
-            if numcpus is not None:
-                encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
-
-            workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
-            if not workers_enabled:
-                encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
+            return self._rust_status(match, listclean)
 
-            (
-                lookup,
-                modified,
-                added,
-                removed,
-                deleted,
-                unknown,
-                clean,
-            ) = rustmod.status(
-                dmap._rustmap,
-                self._rootdir,
-                match.files(),
-                bool(listclean),
-                self._lastnormaltime,
-                self._checkexec,
-            )
-
-            status = scmutil.status(
-                modified=modified,
-                added=added,
-                removed=removed,
-                deleted=deleted,
-                unknown=unknown,
-                ignored=ignored,
-                clean=clean,
-            )
-            return (lookup, status)
+        def noop(f):
+            pass
 
         dcontains = dmap.__contains__
         dget = dmap.__getitem__
         ladd = lookup.append  # aka "unsure"
         madd = modified.append
         aadd = added.append
-        uadd = unknown.append
-        iadd = ignored.append
+        uadd = unknown.append if listunknown else noop
+        iadd = ignored.append if listignored else noop
         radd = removed.append
         dadd = deleted.append
-        cadd = clean.append
+        cadd = clean.append if listclean else noop
         mexact = match.exact
         dirignore = self._dirignore
         checkexec = self._checkexec
@@ -1418,9 +1435,9 @@
 
     def addfile(self, f, oldstate, state, mode, size, mtime):
         """Add a tracked file to the dirstate."""
-        if oldstate in b"?r" and r"_dirs" in self.__dict__:
+        if oldstate in b"?r" and "_dirs" in self.__dict__:
             self._dirs.addpath(f)
-        if oldstate == b"?" and r"_alldirs" in self.__dict__:
+        if oldstate == b"?" and "_alldirs" in self.__dict__:
             self._alldirs.addpath(f)
         self._map[f] = dirstatetuple(state, mode, size, mtime)
         if state != b'n' or mtime == -1:
@@ -1436,11 +1453,11 @@
         the file's previous state.  In the future, we should refactor this
         to be more explicit about what that state is.
         """
-        if oldstate not in b"?r" and r"_dirs" in self.__dict__:
+        if oldstate not in b"?r" and "_dirs" in self.__dict__:
             self._dirs.delpath(f)
-        if oldstate == b"?" and r"_alldirs" in self.__dict__:
+        if oldstate == b"?" and "_alldirs" in self.__dict__:
             self._alldirs.addpath(f)
-        if r"filefoldmap" in self.__dict__:
+        if "filefoldmap" in self.__dict__:
             normed = util.normcase(f)
             self.filefoldmap.pop(normed, None)
         self._map[f] = dirstatetuple(b'r', 0, size, 0)
@@ -1453,11 +1470,11 @@
         """
         exists = self._map.pop(f, None) is not None
         if exists:
-            if oldstate != b"r" and r"_dirs" in self.__dict__:
+            if oldstate != b"r" and "_dirs" in self.__dict__:
                 self._dirs.delpath(f)
-            if r"_alldirs" in self.__dict__:
+            if "_alldirs" in self.__dict__:
                 self._alldirs.delpath(f)
-        if r"filefoldmap" in self.__dict__:
+        if "filefoldmap" in self.__dict__:
             normed = util.normcase(f)
             self.filefoldmap.pop(normed, None)
         self.nonnormalset.discard(f)
@@ -1522,11 +1539,11 @@
 
     @propertycache
     def _dirs(self):
-        return util.dirs(self._map, b'r')
+        return pathutil.dirs(self._map, b'r')
 
     @propertycache
     def _alldirs(self):
-        return util.dirs(self._map)
+        return pathutil.dirs(self._map)
 
     def _opendirstatefile(self):
         fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
--- a/mercurial/discovery.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/discovery.py	Tue Jan 21 13:14:51 2020 -0500
@@ -499,7 +499,7 @@
     repo = pushop.repo
     unfi = repo.unfiltered()
     tonode = unfi.changelog.node
-    torev = unfi.changelog.nodemap.get
+    torev = unfi.changelog.index.get_rev
     public = phases.public
     getphase = unfi._phasecache.phase
     ispublic = lambda r: getphase(unfi, r) == public
--- a/mercurial/dispatch.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/dispatch.py	Tue Jan 21 13:14:51 2020 -0500
@@ -15,7 +15,6 @@
 import re
 import signal
 import sys
-import time
 import traceback
 
 
@@ -102,7 +101,7 @@
 
 
 def run():
-    b"run the command in sys.argv"
+    """run the command in sys.argv"""
     initstdio()
     with tracing.log('parse args into request'):
         req = request(pycompat.sysargv[1:])
@@ -115,6 +114,8 @@
 
     # In all cases we try to flush stdio streams.
     if util.safehasattr(req.ui, b'fout'):
+        assert req.ui is not None  # help pytype
+        assert req.ui.fout is not None  # help pytype
         try:
             req.ui.fout.flush()
         except IOError as e:
@@ -122,6 +123,8 @@
             status = -1
 
     if util.safehasattr(req.ui, b'ferr'):
+        assert req.ui is not None  # help pytype
+        assert req.ui.ferr is not None  # help pytype
         try:
             if err is not None and err.errno != errno.EPIPE:
                 req.ui.ferr.write(
@@ -658,10 +661,10 @@
 
     def __getattr__(self, name):
         adefaults = {
-            r'norepo': True,
-            r'intents': set(),
-            r'optionalrepo': False,
-            r'inferrepo': False,
+            'norepo': True,
+            'intents': set(),
+            'optionalrepo': False,
+            'inferrepo': False,
         }
         if name not in adefaults:
             raise AttributeError(name)
@@ -1036,8 +1039,8 @@
             def get_times():
                 t = os.times()
                 if t[4] == 0.0:
-                    # Windows leaves this as zero, so use time.clock()
-                    t = (t[0], t[1], t[2], t[3], time.clock())
+                    # Windows leaves this as zero, so use time.perf_counter()
+                    t = (t[0], t[1], t[2], t[3], util.timer())
                 return t
 
             s = get_times()
@@ -1108,6 +1111,7 @@
 
         repo = None
         cmdpats = args[:]
+        assert func is not None  # help out pytype
         if not func.norepo:
             # use the repo from the request only if we don't have -R
             if not rpath and not cwd:
--- a/mercurial/encoding.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/encoding.py	Tue Jan 21 13:14:51 2020 -0500
@@ -20,7 +20,24 @@
 
 from .pure import charencode as charencodepure
 
-charencode = policy.importmod(r'charencode')
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Any,
+        Callable,
+        List,
+        Text,
+        Type,
+        TypeVar,
+        Union,
+    )
+
+    # keep pyflakes happy
+    for t in (Any, Callable, List, Text, Type, Union):
+        assert t
+
+    _Tlocalstr = TypeVar('_Tlocalstr', bound='localstr')
+
+charencode = policy.importmod('charencode')
 
 isasciistr = charencode.isasciistr
 asciilower = charencode.asciilower
@@ -45,6 +62,7 @@
 
 
 def hfsignoreclean(s):
+    # type: (bytes) -> bytes
     """Remove codepoints ignored by HFS+ from s.
 
     >>> hfsignoreclean(u'.h\u200cg'.encode('utf-8'))
@@ -69,7 +87,7 @@
     # preferred encoding isn't known yet; use utf-8 to avoid unicode error
     # and recreate it once encoding is settled
     environ = dict(
-        (k.encode(r'utf-8'), v.encode(r'utf-8'))
+        (k.encode('utf-8'), v.encode('utf-8'))
         for k, v in os.environ.items()  # re-exports
     )
 
@@ -103,6 +121,13 @@
         s._utf8 = u
         return s
 
+    if pycompat.TYPE_CHECKING:
+        # pseudo implementation to help pytype see localstr() constructor
+        def __init__(self, u, l):
+            # type: (bytes, bytes) -> None
+            super(localstr, self).__init__(l)
+            self._utf8 = u
+
     def __hash__(self):
         return hash(self._utf8)  # avoid collisions in local string space
 
@@ -119,6 +144,7 @@
 
 
 def tolocal(s):
+    # type: (bytes) -> bytes
     """
     Convert a string from internal UTF-8 to local encoding
 
@@ -162,7 +188,7 @@
             if encoding == b'UTF-8':
                 # fast path
                 return s
-            r = u.encode(_sysstr(encoding), r"replace")
+            r = u.encode(_sysstr(encoding), "replace")
             if u == r.decode(_sysstr(encoding)):
                 # r is a safe, non-lossy encoding of s
                 return safelocalstr(r)
@@ -171,7 +197,7 @@
             # we should only get here if we're looking at an ancient changeset
             try:
                 u = s.decode(_sysstr(fallbackencoding))
-                r = u.encode(_sysstr(encoding), r"replace")
+                r = u.encode(_sysstr(encoding), "replace")
                 if u == r.decode(_sysstr(encoding)):
                     # r is a safe, non-lossy encoding of s
                     return safelocalstr(r)
@@ -179,12 +205,13 @@
             except UnicodeDecodeError:
                 u = s.decode("utf-8", "replace")  # last ditch
                 # can't round-trip
-                return u.encode(_sysstr(encoding), r"replace")
+                return u.encode(_sysstr(encoding), "replace")
     except LookupError as k:
         raise error.Abort(k, hint=b"please check your locale settings")
 
 
 def fromlocal(s):
+    # type: (bytes) -> bytes
     """
     Convert a string from the local character encoding to UTF-8
 
@@ -214,16 +241,19 @@
 
 
 def unitolocal(u):
+    # type: (Text) -> bytes
     """Convert a unicode string to a byte string of local encoding"""
     return tolocal(u.encode('utf-8'))
 
 
 def unifromlocal(s):
+    # type: (bytes) -> Text
     """Convert a byte string of local encoding to a unicode string"""
     return fromlocal(s).decode('utf-8')
 
 
 def unimethod(bytesfunc):
+    # type: (Callable[[Any], bytes]) -> Callable[[Any], Text]
     """Create a proxy method that forwards __unicode__() and __str__() of
     Python 3 to __bytes__()"""
 
@@ -241,15 +271,22 @@
     strfromlocal = unifromlocal
     strmethod = unimethod
 else:
-    strtolocal = pycompat.identity
-    strfromlocal = pycompat.identity
+
+    def strtolocal(s):
+        # type: (str) -> bytes
+        return s  # pytype: disable=bad-return-type
+
+    def strfromlocal(s):
+        # type: (bytes) -> str
+        return s  # pytype: disable=bad-return-type
+
     strmethod = pycompat.identity
 
 if not _nativeenviron:
     # now encoding and helper functions are available, recreate the environ
     # dict to be exported to other modules
     environ = dict(
-        (tolocal(k.encode(r'utf-8')), tolocal(v.encode(r'utf-8')))
+        (tolocal(k.encode('utf-8')), tolocal(v.encode('utf-8')))
         for k, v in os.environ.items()  # re-exports
     )
 
@@ -274,12 +311,14 @@
 
 
 def colwidth(s):
-    b"Find the column width of a string for display in the local encoding"
-    return ucolwidth(s.decode(_sysstr(encoding), r'replace'))
+    # type: (bytes) -> int
+    """Find the column width of a string for display in the local encoding"""
+    return ucolwidth(s.decode(_sysstr(encoding), 'replace'))
 
 
 def ucolwidth(d):
-    b"Find the column width of a Unicode string for display"
+    # type: (Text) -> int
+    """Find the column width of a Unicode string for display"""
     eaw = getattr(unicodedata, 'east_asian_width', None)
     if eaw is not None:
         return sum([eaw(c) in _wide and 2 or 1 for c in d])
@@ -287,15 +326,18 @@
 
 
 def getcols(s, start, c):
+    # type: (bytes, int, int) -> bytes
     '''Use colwidth to find a c-column substring of s starting at byte
     index start'''
     for x in pycompat.xrange(start + c, len(s)):
         t = s[start:x]
         if colwidth(t) == c:
             return t
+    raise ValueError('substring not found')
 
 
 def trim(s, width, ellipsis=b'', leftside=False):
+    # type: (bytes, int, bytes, bool) -> bytes
     """Trim string 's' to at most 'width' columns (including 'ellipsis').
 
     If 'leftside' is True, left side of string 's' is trimmed.
@@ -393,7 +435,8 @@
 
 
 def lower(s):
-    b"best-effort encoding-aware case-folding of local string s"
+    # type: (bytes) -> bytes
+    """best-effort encoding-aware case-folding of local string s"""
     try:
         return asciilower(s)
     except UnicodeDecodeError:
@@ -415,7 +458,8 @@
 
 
 def upper(s):
-    b"best-effort encoding-aware case-folding of local string s"
+    # type: (bytes) -> bytes
+    """best-effort encoding-aware case-folding of local string s"""
     try:
         return asciiupper(s)
     except UnicodeDecodeError:
@@ -423,6 +467,7 @@
 
 
 def upperfallback(s):
+    # type: (Any) -> Any
     try:
         if isinstance(s, localstr):
             u = s._utf8.decode("utf-8")
@@ -457,6 +502,7 @@
 
 
 def jsonescape(s, paranoid=False):
+    # type: (Any, Any) -> Any
     '''returns a string suitable for JSON
 
     JSON is problematic for us because it doesn't support non-Unicode
@@ -520,6 +566,7 @@
 
 
 def getutf8char(s, pos):
+    # type: (bytes, int) -> bytes
     '''get the next full utf-8 character in the given string, starting at pos
 
     Raises a UnicodeError if the given location does not start a valid
@@ -538,6 +585,7 @@
 
 
 def toutf8b(s):
+    # type: (bytes) -> bytes
     '''convert a local, possibly-binary string into UTF-8b
 
     This is intended as a generic method to preserve data when working
@@ -606,6 +654,7 @@
 
 
 def fromutf8b(s):
+    # type: (bytes) -> bytes
     '''Given a UTF-8b string, return a local, possibly-binary string.
 
     return the original binary string. This
--- a/mercurial/error.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/error.py	Tue Jan 21 13:14:51 2020 -0500
@@ -34,7 +34,7 @@
     """
 
     def __init__(self, *args, **kw):
-        self.hint = kw.pop(r'hint', None)
+        self.hint = kw.pop('hint', None)
         super(Hint, self).__init__(*args, **kw)
 
 
--- a/mercurial/exchange.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/exchange.py	Tue Jan 21 13:14:51 2020 -0500
@@ -8,7 +8,6 @@
 from __future__ import absolute_import
 
 import collections
-import hashlib
 
 from .i18n import _
 from .node import (
@@ -40,7 +39,10 @@
     wireprototypes,
 )
 from .interfaces import repository
-from .utils import stringutil
+from .utils import (
+    hashutil,
+    stringutil,
+)
 
 urlerr = util.urlerr
 urlreq = util.urlreq
@@ -524,8 +526,8 @@
         # We can pick:
         # * missingheads part of common (::commonheads)
         common = self.outgoing.common
-        nm = self.repo.changelog.nodemap
-        cheads = [node for node in self.revs if nm[node] in common]
+        rev = self.repo.changelog.index.rev
+        cheads = [node for node in self.revs if rev(node) in common]
         # and
         # * commonheads parents on missing
         revset = unfi.set(
@@ -646,6 +648,8 @@
                 pushop.repo.checkpush(pushop)
                 _checkpublish(pushop)
                 _pushdiscovery(pushop)
+                if not pushop.force:
+                    _checksubrepostate(pushop)
                 if not _forcebundle1(pushop):
                     _pushbundle2(pushop)
                 _pushchangeset(pushop)
@@ -694,6 +698,17 @@
         step(pushop)
 
 
+def _checksubrepostate(pushop):
+    """Ensure all outgoing referenced subrepo revisions are present locally"""
+    for n in pushop.outgoing.missing:
+        ctx = pushop.repo[n]
+
+        if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
+            for subpath in sorted(ctx.substate):
+                sub = ctx.sub(subpath)
+                sub.verify(onpush=True)
+
+
 @pushdiscovery(b'changeset')
 def _pushdiscoverychangeset(pushop):
     """discover the changeset that need to be pushed"""
@@ -1851,7 +1866,7 @@
         pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
     )
     common, fetch, rheads = tmp
-    nm = pullop.repo.unfiltered().changelog.nodemap
+    has_node = pullop.repo.unfiltered().changelog.index.has_node
     if fetch and rheads:
         # If a remote heads is filtered locally, put in back in common.
         #
@@ -1864,7 +1879,7 @@
         # but are not including a remote heads, we'll not be able to detect it,
         scommon = set(common)
         for n in rheads:
-            if n in nm:
+            if has_node(n):
                 if n not in scommon:
                     common.append(n)
         if set(rheads).issubset(set(common)):
@@ -2097,7 +2112,7 @@
         dheads = []
     unfi = pullop.repo.unfiltered()
     phase = unfi._phasecache.phase
-    rev = unfi.changelog.nodemap.get
+    rev = unfi.changelog.index.get_rev
     public = phases.public
     draft = phases.draft
 
@@ -2181,9 +2196,8 @@
     )
     if not user_includes:
         raise error.Abort(
-            _(b"{} configuration for user {} is empty").format(
-                _NARROWACL_SECTION, username
-            )
+            _(b"%s configuration for user %s is empty")
+            % (_NARROWACL_SECTION, username)
         )
 
     user_includes = [
@@ -2193,8 +2207,8 @@
         b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
     ]
 
-    req_includes = set(kwargs.get(r'includepats', []))
-    req_excludes = set(kwargs.get(r'excludepats', []))
+    req_includes = set(kwargs.get('includepats', []))
+    req_excludes = set(kwargs.get('excludepats', []))
 
     req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
         req_includes, req_excludes, user_includes, user_excludes
@@ -2202,18 +2216,17 @@
 
     if invalid_includes:
         raise error.Abort(
-            _(b"The following includes are not accessible for {}: {}").format(
-                username, invalid_includes
-            )
+            _(b"The following includes are not accessible for %s: %s")
+            % (username, stringutil.pprint(invalid_includes))
         )
 
     new_args = {}
     new_args.update(kwargs)
-    new_args[r'narrow'] = True
-    new_args[r'narrow_acl'] = True
-    new_args[r'includepats'] = req_includes
+    new_args['narrow'] = True
+    new_args['narrow_acl'] = True
+    new_args['includepats'] = req_includes
     if req_excludes:
-        new_args[r'excludepats'] = req_excludes
+        new_args['excludepats'] = req_excludes
 
     return new_args
 
@@ -2476,7 +2489,7 @@
     **kwargs
 ):
     """add a changegroup part to the requested bundle"""
-    if not kwargs.get(r'cg', True):
+    if not kwargs.get('cg', True) or not b2caps:
         return
 
     version = b'01'
@@ -2495,9 +2508,9 @@
     if not outgoing.missing:
         return
 
-    if kwargs.get(r'narrow', False):
-        include = sorted(filter(bool, kwargs.get(r'includepats', [])))
-        exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
+    if kwargs.get('narrow', False):
+        include = sorted(filter(bool, kwargs.get('includepats', [])))
+        exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
         matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
     else:
         matcher = None
@@ -2519,8 +2532,8 @@
         part.addparam(b'exp-sidedata', b'1')
 
     if (
-        kwargs.get(r'narrow', False)
-        and kwargs.get(r'narrow_acl', False)
+        kwargs.get('narrow', False)
+        and kwargs.get('narrow_acl', False)
         and (include or exclude)
     ):
         # this is mandatory because otherwise ACL clients won't work
@@ -2536,9 +2549,9 @@
     bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
 ):
     """add a bookmark part to the requested bundle"""
-    if not kwargs.get(r'bookmarks', False):
+    if not kwargs.get('bookmarks', False):
         return
-    if b'bookmarks' not in b2caps:
+    if not b2caps or b'bookmarks' not in b2caps:
         raise error.Abort(_(b'no common bookmarks exchange method'))
     books = bookmod.listbinbookmarks(repo)
     data = bookmod.binaryencode(books)
@@ -2551,7 +2564,7 @@
     bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
 ):
     """add parts containing listkeys namespaces to the requested bundle"""
-    listkeys = kwargs.get(r'listkeys', ())
+    listkeys = kwargs.get('listkeys', ())
     for namespace in listkeys:
         part = bundler.newpart(b'listkeys')
         part.addparam(b'namespace', namespace)
@@ -2564,7 +2577,7 @@
     bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
 ):
     """add an obsolescence markers part to the requested bundle"""
-    if kwargs.get(r'obsmarkers', False):
+    if kwargs.get('obsmarkers', False):
         if heads is None:
             heads = repo.heads()
         subset = [c.node() for c in repo.set(b'::%ln', heads)]
@@ -2578,8 +2591,8 @@
     bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
 ):
     """add phase heads part to the requested bundle"""
-    if kwargs.get(r'phases', False):
-        if not b'heads' in b2caps.get(b'phases'):
+    if kwargs.get('phases', False):
+        if not b2caps or b'heads' not in b2caps.get(b'phases'):
             raise error.Abort(_(b'no common phases exchange method'))
         if heads is None:
             heads = repo.heads()
@@ -2643,7 +2656,7 @@
     # Don't send unless:
     # - changeset are being exchanged,
     # - the client supports it.
-    if not (kwargs.get(r'cg', True) and b'hgtagsfnodes' in b2caps):
+    if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
         return
 
     outgoing = _computeoutgoing(repo, heads, common)
@@ -2676,9 +2689,10 @@
     # - the client supports it.
     # - narrow bundle isn't in play (not currently compatible).
     if (
-        not kwargs.get(r'cg', True)
+        not kwargs.get('cg', True)
+        or not b2caps
         or b'rev-branch-cache' not in b2caps
-        or kwargs.get(r'narrow', False)
+        or kwargs.get('narrow', False)
         or repo.ui.has_section(_NARROWACL_SECTION)
     ):
         return
@@ -2693,7 +2707,7 @@
     Used by peer for unbundling.
     """
     heads = repo.heads()
-    heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
+    heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
     if not (
         their_heads == [b'force']
         or their_heads == heads
--- a/mercurial/exchangev2.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/exchangev2.py	Tue Jan 21 13:14:51 2020 -0500
@@ -291,9 +291,9 @@
     # See the comment in exchange._pulldiscoverychangegroup() for more.
 
     if fetch and remoteheads:
-        nodemap = repo.unfiltered().changelog.nodemap
+        has_node = repo.unfiltered().changelog.index.has_node
 
-        common |= {head for head in remoteheads if head in nodemap}
+        common |= {head for head in remoteheads if has_node(head)}
 
         if set(remoteheads).issubset(common):
             fetch = []
--- a/mercurial/extensions.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/extensions.py	Tue Jan 21 13:14:51 2020 -0500
@@ -92,7 +92,11 @@
         # module/__init__.py style
         d, f = os.path.split(path)
         fd, fpath, desc = imp.find_module(f, [d])
-        return imp.load_module(module_name, fd, fpath, desc)
+        # When https://github.com/python/typeshed/issues/3466 is fixed
+        # and in a pytype release we can drop this disable.
+        return imp.load_module(
+            module_name, fd, fpath, desc  # pytype: disable=wrong-arg-types
+        )
     else:
         try:
             return imp.load_source(module_name, path)
@@ -591,9 +595,7 @@
             break
 
     if currcls is object:
-        raise AttributeError(
-            r"type '%s' has no property '%s'" % (cls, propname)
-        )
+        raise AttributeError("type '%s' has no property '%s'" % (cls, propname))
 
 
 class wrappedfunction(object):
@@ -783,7 +785,7 @@
 def disabled():
     '''find disabled extensions from hgext. returns a dict of {name: desc}'''
     try:
-        from hgext import __index__
+        from hgext import __index__  # pytype: disable=import-error
 
         return dict(
             (name, gettext(desc))
@@ -809,7 +811,7 @@
 def disabledext(name):
     '''find a specific disabled extension from hgext. returns desc'''
     try:
-        from hgext import __index__
+        from hgext import __index__  # pytype: disable=import-error
 
         if name in _order:  # enabled
             return
@@ -836,7 +838,7 @@
                 continue
             if not isinstance(d.func, ast.Name):
                 continue
-            if d.func.id != r'command':
+            if d.func.id != 'command':
                 continue
             yield d
 
--- a/mercurial/fancyopts.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/fancyopts.py	Tue Jan 21 13:14:51 2020 -0500
@@ -205,7 +205,7 @@
     return parsedopts, parsedargs
 
 
-class customopt(object):
+class customopt(object):  # pytype: disable=ignored-metaclass
     """Manage defaults and mutations for any type of opt."""
 
     __metaclass__ = abc.ABCMeta
--- a/mercurial/filemerge.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/filemerge.py	Tue Jan 21 13:14:51 2020 -0500
@@ -119,7 +119,7 @@
         """
         return not (
             fctx.isabsent()
-            and fctx.ctx() == self.ctx()
+            and fctx.changectx() == self.changectx()
             and fctx.path() == self.path()
         )
 
@@ -279,7 +279,7 @@
 
 
 def _eoltype(data):
-    b"Guess the EOL type of a file"
+    """Guess the EOL type of a file"""
     if b'\0' in data:  # binary
         return None
     if b'\r\n' in data:  # Windows
@@ -292,7 +292,7 @@
 
 
 def _matcheol(file, back):
-    b"Convert EOL markers in a file to match origfile"
+    """Convert EOL markers in a file to match origfile"""
     tostyle = _eoltype(back.data())  # No repo.wread filters?
     if tostyle:
         data = util.readfile(file)
@@ -693,7 +693,7 @@
     ui.status(t.renderdefault(props))
 
 
-def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
+def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels):
     tool, toolpath, binary, symlink, scriptfn = toolconf
     uipathfn = scmutil.getuipathfn(repo)
     if fcd.isabsent() or fco.isabsent():
@@ -934,10 +934,10 @@
             name = os.path.join(tmproot, pre)
             if ext:
                 name += ext
-            f = open(name, r"wb")
+            f = open(name, "wb")
         else:
             fd, name = pycompat.mkstemp(prefix=pre + b'.', suffix=ext)
-            f = os.fdopen(fd, r"wb")
+            f = os.fdopen(fd, "wb")
         return f, name
 
     def tempfromcontext(prefix, ctx):
--- a/mercurial/fileset.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/fileset.py	Tue Jan 21 13:14:51 2020 -0500
@@ -520,29 +520,30 @@
 
 
 class matchctx(object):
-    def __init__(self, basectx, ctx, badfn=None):
+    def __init__(self, basectx, ctx, cwd, badfn=None):
         self._basectx = basectx
         self.ctx = ctx
         self._badfn = badfn
         self._match = None
         self._status = None
+        self.cwd = cwd
 
     def narrowed(self, match):
         """Create matchctx for a sub-tree narrowed by the given matcher"""
-        mctx = matchctx(self._basectx, self.ctx, self._badfn)
+        mctx = matchctx(self._basectx, self.ctx, self.cwd, self._badfn)
         mctx._match = match
         # leave wider status which we don't have to care
         mctx._status = self._status
         return mctx
 
     def switch(self, basectx, ctx):
-        mctx = matchctx(basectx, ctx, self._badfn)
+        mctx = matchctx(basectx, ctx, self.cwd, self._badfn)
         mctx._match = self._match
         return mctx
 
     def withstatus(self, keys):
         """Create matchctx which has precomputed status specified by the keys"""
-        mctx = matchctx(self._basectx, self.ctx, self._badfn)
+        mctx = matchctx(self._basectx, self.ctx, self.cwd, self._badfn)
         mctx._match = self._match
         mctx._buildstatus(keys)
         return mctx
@@ -560,7 +561,7 @@
         return self._status
 
     def matcher(self, patterns):
-        return self.ctx.match(patterns, badfn=self._badfn)
+        return self.ctx.match(patterns, badfn=self._badfn, cwd=self.cwd)
 
     def predicate(self, predfn, predrepr=None, cache=False):
         """Create a matcher to select files by predfn(filename)"""
@@ -617,12 +618,12 @@
         return matchmod.never(badfn=self._badfn)
 
 
-def match(ctx, expr, badfn=None):
+def match(ctx, cwd, expr, badfn=None):
     """Create a matcher for a single fileset expression"""
     tree = filesetlang.parse(expr)
     tree = filesetlang.analyze(tree)
     tree = filesetlang.optimize(tree)
-    mctx = matchctx(ctx.p1(), ctx, badfn=badfn)
+    mctx = matchctx(ctx.p1(), ctx, cwd, badfn=badfn)
     return getmatch(mctx, tree)
 
 
--- a/mercurial/graphmod.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/graphmod.py	Tue Jan 21 13:14:51 2020 -0500
@@ -20,6 +20,7 @@
 from __future__ import absolute_import
 
 from .node import nullrev
+from .thirdparty import attr
 from . import (
     dagop,
     pycompat,
@@ -192,7 +193,7 @@
 
 def asciiedges(type, char, state, rev, parents):
     """adds edge info to changelog DAG walk suitable for ascii()"""
-    seen = state[b'seen']
+    seen = state.seen
     if rev not in seen:
         seen.append(rev)
     nodeidx = seen.index(rev)
@@ -207,7 +208,7 @@
             knownparents.append(parent)
         else:
             newparents.append(parent)
-            state[b'edges'][parent] = state[b'styles'].get(ptype, b'|')
+            state.edges[parent] = state.styles.get(ptype, b'|')
 
     ncols = len(seen)
     width = 1 + ncols * 2
@@ -240,7 +241,7 @@
     if nmorecols > 0:
         width += 2
     # remove current node from edge characters, no longer needed
-    state[b'edges'].pop(rev, None)
+    state.edges.pop(rev, None)
     yield (type, char, width, (nodeidx, edges, ncols, nmorecols))
 
 
@@ -322,7 +323,7 @@
     while edgechars and edgechars[-1] is None:
         edgechars.pop()
     shift_size = max((edgechars.count(None) * 2) - 1, 0)
-    minlines = 3 if not state[b'graphshorten'] else 2
+    minlines = 3 if not state.graphshorten else 2
     while len(lines) < minlines + shift_size:
         lines.append(extra[:])
 
@@ -344,7 +345,7 @@
                 positions[i] = max(pos, targets[i])
                 line[pos] = b'/' if pos > targets[i] else extra[toshift[i]]
 
-    map = {1: b'|', 2: b'~'} if not state[b'graphshorten'] else {1: b'~'}
+    map = {1: b'|', 2: b'~'} if not state.graphshorten else {1: b'~'}
     for i, line in enumerate(lines):
         if None not in line:
             continue
@@ -357,16 +358,16 @@
         seen.remove(parent)
 
 
-def asciistate():
-    """returns the initial value for the "state" argument to ascii()"""
-    return {
-        b'seen': [],
-        b'edges': {},
-        b'lastcoldiff': 0,
-        b'lastindex': 0,
-        b'styles': EDGES.copy(),
-        b'graphshorten': False,
-    }
+@attr.s
+class asciistate(object):
+    """State of ascii() graph rendering"""
+
+    seen = attr.ib(init=False, default=attr.Factory(list))
+    edges = attr.ib(init=False, default=attr.Factory(dict))
+    lastcoldiff = attr.ib(init=False, default=0)
+    lastindex = attr.ib(init=False, default=0)
+    styles = attr.ib(init=False, default=attr.Factory(EDGES.copy))
+    graphshorten = attr.ib(init=False, default=False)
 
 
 def outputgraph(ui, graph):
@@ -409,7 +410,7 @@
     idx, edges, ncols, coldiff = coldata
     assert -2 < coldiff < 2
 
-    edgemap, seen = state[b'edges'], state[b'seen']
+    edgemap, seen = state.edges, state.seen
     # Be tolerant of history issues; make sure we have at least ncols + coldiff
     # elements to work with. See test-glog.t for broken history test cases.
     echars = [c for p in seen for c in (edgemap.get(p, b'|'), b' ')]
@@ -452,10 +453,10 @@
         _getnodelineedgestail(
             echars,
             idx,
-            state[b'lastindex'],
+            state.lastindex,
             ncols,
             coldiff,
-            state[b'lastcoldiff'],
+            state.lastcoldiff,
             fix_nodeline_tail,
         )
     )
@@ -485,7 +486,7 @@
 
     # If 'graphshorten' config, only draw shift_interline
     # when there is any non vertical flow in graph.
-    if state[b'graphshorten']:
+    if state.graphshorten:
         if any(c in br'\/' for c in shift_interline if c):
             lines.append(shift_interline)
     # Else, no 'graphshorten' config so draw shift_interline.
@@ -512,5 +513,5 @@
     outputgraph(ui, zip(lines, text))
 
     # ... and start over
-    state[b'lastcoldiff'] = coldiff
-    state[b'lastindex'] = idx
+    state.lastcoldiff = coldiff
+    state.lastindex = idx
--- a/mercurial/hbisect.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hbisect.py	Tue Jan 21 13:14:51 2020 -0500
@@ -11,6 +11,7 @@
 from __future__ import absolute_import
 
 import collections
+import contextlib
 
 from .i18n import _
 from .node import (
@@ -180,6 +181,15 @@
         raise error.Abort(_(b'cannot bisect (no known bad revisions)'))
 
 
+@contextlib.contextmanager
+def restore_state(repo, state, node):
+    try:
+        yield
+    finally:
+        state[b'current'] = [node]
+        save_state(repo, state)
+
+
 def get(repo, status):
     """
     Return a list of revision(s) that match the given status:
--- a/mercurial/help.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/help.py	Tue Jan 21 13:14:51 2020 -0500
@@ -8,7 +8,6 @@
 from __future__ import absolute_import
 
 import itertools
-import os
 import re
 import textwrap
 
@@ -36,7 +35,10 @@
     util,
 )
 from .hgweb import webcommands
-from .utils import compression
+from .utils import (
+    compression,
+    resourceutil,
+)
 
 _exclkeywords = {
     b"(ADVANCED)",
@@ -311,11 +313,11 @@
     """Return a delayed loader for help/topic.txt."""
 
     def loader(ui):
-        docdir = os.path.join(util.datapath, b'help')
+        package = b'mercurial.helptext'
         if subdir:
-            docdir = os.path.join(docdir, subdir)
-        path = os.path.join(docdir, topic + b".txt")
-        doc = gettext(util.readfile(path))
+            package += b'.' + subdir
+        with resourceutil.open_resource(package, topic + b'.txt') as fp:
+            doc = gettext(fp.read())
         for rewriter in helphooks.get(topic, []):
             doc = rewriter(ui, topic, doc)
         return doc
@@ -805,7 +807,7 @@
                     appendcmds(catfns)
 
         ex = opts.get
-        anyopts = ex(r'keyword') or not (ex(r'command') or ex(r'extension'))
+        anyopts = ex('keyword') or not (ex('command') or ex('extension'))
         if not name and anyopts:
             exts = listexts(
                 _(b'enabled extensions:'),
--- a/mercurial/help/bundlespec.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,84 +0,0 @@
-Mercurial supports generating standalone "bundle" files that hold repository
-data. These "bundles" are typically saved locally and used later or exchanged
-between different repositories, possibly on different machines. Example
-commands using bundles are :hg:`bundle` and :hg:`unbundle`.
-
-Generation of bundle files is controlled by a "bundle specification"
-("bundlespec") string. This string tells the bundle generation process how
-to create the bundle.
-
-A "bundlespec" string is composed of the following elements:
-
-type
-    A string denoting the bundle format to use.
-
-compression
-    Denotes the compression engine to use compressing the raw bundle data.
-
-parameters
-    Arbitrary key-value parameters to further control bundle generation.
-
-A "bundlespec" string has the following formats:
-
-<type>
-    The literal bundle format string is used.
-
-<compression>-<type>
-    The compression engine and format are delimited by a hyphen (``-``).
-
-Optional parameters follow the ``<type>``. Parameters are URI escaped
-``key=value`` pairs. Each pair is delimited by a semicolon (``;``). The
-first parameter begins after a ``;`` immediately following the ``<type>``
-value.
-
-Available Types
-===============
-
-The following bundle <type> strings are available:
-
-v1
-    Produces a legacy "changegroup" version 1 bundle.
-
-    This format is compatible with nearly all Mercurial clients because it is
-    the oldest. However, it has some limitations, which is why it is no longer
-    the default for new repositories.
-
-    ``v1`` bundles can be used with modern repositories using the "generaldelta"
-    storage format. However, it may take longer to produce the bundle and the
-    resulting bundle may be significantly larger than a ``v2`` bundle.
-
-    ``v1`` bundles can only use the ``gzip``, ``bzip2``, and ``none`` compression
-    formats.
-
-v2
-    Produces a version 2 bundle.
-
-    Version 2 bundles are an extensible format that can store additional
-    repository data (such as bookmarks and phases information) and they can
-    store data more efficiently, resulting in smaller bundles.
-
-    Version 2 bundles can also use modern compression engines, such as
-    ``zstd``, making them faster to compress and often smaller.
-
-Available Compression Engines
-=============================
-
-The following bundle <compression> engines can be used:
-
-.. bundlecompressionmarker
-
-Examples
-========
-
-``v2``
-    Produce a ``v2`` bundle using default options, including compression.
-
-``none-v1``
-    Produce a ``v1`` bundle with no compression.
-
-``zstd-v2``
-    Produce a ``v2`` bundle with zstandard compression using default
-    settings.
-
-``zstd-v1``
-    This errors because ``zstd`` is not supported for ``v1`` types.
--- a/mercurial/help/color.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,149 +0,0 @@
-Mercurial colorizes output from several commands.
-
-For example, the diff command shows additions in green and deletions
-in red, while the status command shows modified files in magenta. Many
-other commands have analogous colors. It is possible to customize
-these colors.
-
-To enable color (default) whenever possible use::
-
-  [ui]
-  color = yes
-
-To disable color use::
-
-  [ui]
-  color = no
-
-See :hg:`help config.ui.color` for details.
-
-.. container:: windows
-
-  The default pager on Windows does not support color, so enabling the pager
-  will effectively disable color.  See :hg:`help config.ui.paginate` to disable
-  the pager.  Alternately, MSYS and Cygwin shells provide `less` as a pager,
-  which can be configured to support ANSI color mode.  Windows 10 natively
-  supports ANSI color mode.
-
-Mode
-====
-
-Mercurial can use various systems to display color. The supported modes are
-``ansi``, ``win32``, and ``terminfo``.  See :hg:`help config.color` for details
-about how to control the mode.
-
-Effects
-=======
-
-Other effects in addition to color, like bold and underlined text, are
-also available. By default, the terminfo database is used to find the
-terminal codes used to change color and effect.  If terminfo is not
-available, then effects are rendered with the ECMA-48 SGR control
-function (aka ANSI escape codes).
-
-The available effects in terminfo mode are 'blink', 'bold', 'dim',
-'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
-ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
-'underline'.  How each is rendered depends on the terminal emulator.
-Some may not be available for a given terminal type, and will be
-silently ignored.
-
-If the terminfo entry for your terminal is missing codes for an effect
-or has the wrong codes, you can add or override those codes in your
-configuration::
-
-  [color]
-  terminfo.dim = \E[2m
-
-where '\E' is substituted with an escape character.
-
-Labels
-======
-
-Text receives color effects depending on the labels that it has. Many
-default Mercurial commands emit labelled text. You can also define
-your own labels in templates using the label function, see :hg:`help
-templates`. A single portion of text may have more than one label. In
-that case, effects given to the last label will override any other
-effects. This includes the special "none" effect, which nullifies
-other effects.
-
-Labels are normally invisible. In order to see these labels and their
-position in the text, use the global --color=debug option. The same
-anchor text may be associated to multiple labels, e.g.
-
-  [log.changeset changeset.secret|changeset:   22611:6f0a53c8f587]
-
-The following are the default effects for some default labels. Default
-effects may be overridden from your configuration file::
-
-  [color]
-  status.modified = blue bold underline red_background
-  status.added = green bold
-  status.removed = red bold blue_background
-  status.deleted = cyan bold underline
-  status.unknown = magenta bold underline
-  status.ignored = black bold
-
-  # 'none' turns off all effects
-  status.clean = none
-  status.copied = none
-
-  qseries.applied = blue bold underline
-  qseries.unapplied = black bold
-  qseries.missing = red bold
-
-  diff.diffline = bold
-  diff.extended = cyan bold
-  diff.file_a = red bold
-  diff.file_b = green bold
-  diff.hunk = magenta
-  diff.deleted = red
-  diff.inserted = green
-  diff.changed = white
-  diff.tab =
-  diff.trailingwhitespace = bold red_background
-
-  # Blank so it inherits the style of the surrounding label
-  changeset.public =
-  changeset.draft =
-  changeset.secret =
-
-  resolve.unresolved = red bold
-  resolve.resolved = green bold
-
-  bookmarks.active = green
-
-  branches.active = none
-  branches.closed = black bold
-  branches.current = green
-  branches.inactive = none
-
-  tags.normal = green
-  tags.local = black bold
-
-  rebase.rebased = blue
-  rebase.remaining = red bold
-
-  shelve.age = cyan
-  shelve.newest = green bold
-  shelve.name = blue bold
-
-  histedit.remaining = red bold
-
-Custom colors
-=============
-
-Because there are only eight standard colors, Mercurial allows you
-to define color names for other color slots which might be available
-for your terminal type, assuming terminfo mode.  For instance::
-
-  color.brightblue = 12
-  color.pink = 207
-  color.orange = 202
-
-to set 'brightblue' to color slot 12 (useful for 16 color terminals
-that have brighter colors defined in the upper eight) and, 'pink' and
-'orange' to colors in 256-color xterm's default color cube.  These
-defined colors may then be used as any of the pre-defined eight,
-including appending '_background' to set the background to that color.
--- a/mercurial/help/common.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-.. Common link and substitution definitions.
-
-.. |hg(1)| replace:: **hg**\ (1)
-.. _hg(1): hg.1.html
-.. |hgrc(5)| replace:: **hgrc**\ (5)
-.. _hgrc(5): hgrc.5.html
-.. |hgignore(5)| replace:: **hgignore**\ (5)
-.. _hgignore(5): hgignore.5.html
--- a/mercurial/help/config.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2870 +0,0 @@
-The Mercurial system uses a set of configuration files to control
-aspects of its behavior.
-
-Troubleshooting
-===============
-
-If you're having problems with your configuration,
-:hg:`config --debug` can help you understand what is introducing
-a setting into your environment.
-
-See :hg:`help config.syntax` and :hg:`help config.files`
-for information about how and where to override things.
-
-Structure
-=========
-
-The configuration files use a simple ini-file format. A configuration
-file consists of sections, led by a ``[section]`` header and followed
-by ``name = value`` entries::
-
-  [ui]
-  username = Firstname Lastname <firstname.lastname@example.net>
-  verbose = True
-
-The above entries will be referred to as ``ui.username`` and
-``ui.verbose``, respectively. See :hg:`help config.syntax`.
-
-Files
-=====
-
-Mercurial reads configuration data from several files, if they exist.
-These files do not exist by default and you will have to create the
-appropriate configuration files yourself:
-
-Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
-
-Global configuration like the username setting is typically put into:
-
-.. container:: windows
-
-  - ``%USERPROFILE%\mercurial.ini`` (on Windows)
-
-.. container:: unix.plan9
-
-  - ``$HOME/.hgrc`` (on Unix, Plan9)
-
-The names of these files depend on the system on which Mercurial is
-installed. ``*.rc`` files from a single directory are read in
-alphabetical order, later ones overriding earlier ones. Where multiple
-paths are given below, settings from earlier paths override later
-ones.
-
-.. container:: verbose.unix
-
-  On Unix, the following files are consulted:
-
-  - ``<repo>/.hg/hgrc`` (per-repository)
-  - ``$HOME/.hgrc`` (per-user)
-  - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
-  - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
-  - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
-  - ``/etc/mercurial/hgrc`` (per-system)
-  - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
-  - ``<internal>/default.d/*.rc`` (defaults)
-
-.. container:: verbose.windows
-
-  On Windows, the following files are consulted:
-
-  - ``<repo>/.hg/hgrc`` (per-repository)
-  - ``%USERPROFILE%\.hgrc`` (per-user)
-  - ``%USERPROFILE%\Mercurial.ini`` (per-user)
-  - ``%HOME%\.hgrc`` (per-user)
-  - ``%HOME%\Mercurial.ini`` (per-user)
-  - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-installation)
-  - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
-  - ``<install-dir>\Mercurial.ini`` (per-installation)
-  - ``<internal>/default.d/*.rc`` (defaults)
-
-  .. note::
-
-   The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
-   is used when running 32-bit Python on 64-bit Windows.
-
-.. container:: windows
-
-  On Windows 9x, ``%HOME%`` is replaced by ``%APPDATA%``.
-
-.. container:: verbose.plan9
-
-  On Plan9, the following files are consulted:
-
-  - ``<repo>/.hg/hgrc`` (per-repository)
-  - ``$home/lib/hgrc`` (per-user)
-  - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
-  - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
-  - ``/lib/mercurial/hgrc`` (per-system)
-  - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
-  - ``<internal>/default.d/*.rc`` (defaults)
-
-Per-repository configuration options only apply in a
-particular repository. This file is not version-controlled, and
-will not get transferred during a "clone" operation. Options in
-this file override options in all other configuration files.
-
-.. container:: unix.plan9
-
-  On Plan 9 and Unix, most of this file will be ignored if it doesn't
-  belong to a trusted user or to a trusted group. See
-  :hg:`help config.trusted` for more details.
-
-Per-user configuration file(s) are for the user running Mercurial.  Options
-in these files apply to all Mercurial commands executed by this user in any
-directory. Options in these files override per-system and per-installation
-options.
-
-Per-installation configuration files are searched for in the
-directory where Mercurial is installed. ``<install-root>`` is the
-parent directory of the **hg** executable (or symlink) being run.
-
-.. container:: unix.plan9
-
-  For example, if installed in ``/shared/tools/bin/hg``, Mercurial
-  will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
-  files apply to all Mercurial commands executed by any user in any
-  directory.
-
-Per-installation configuration files are for the system on
-which Mercurial is running. Options in these files apply to all
-Mercurial commands executed by any user in any directory. Registry
-keys contain PATH-like strings, every part of which must reference
-a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
-be read.  Mercurial checks each of these locations in the specified
-order until one or more configuration files are detected.
-
-Per-system configuration files are for the system on which Mercurial
-is running. Options in these files apply to all Mercurial commands
-executed by any user in any directory. Options in these files
-override per-installation options.
-
-Mercurial comes with some default configuration. The default configuration
-files are installed with Mercurial and will be overwritten on upgrades. Default
-configuration files should never be edited by users or administrators but can
-be overridden in other configuration files. So far the directory only contains
-merge tool configuration but packagers can also put other default configuration
-there.
-
-Syntax
-======
-
-A configuration file consists of sections, led by a ``[section]`` header
-and followed by ``name = value`` entries (sometimes called
-``configuration keys``)::
-
-    [spam]
-    eggs=ham
-    green=
-       eggs
-
-Each line contains one entry. If the lines that follow are indented,
-they are treated as continuations of that entry. Leading whitespace is
-removed from values. Empty lines are skipped. Lines beginning with
-``#`` or ``;`` are ignored and may be used to provide comments.
-
-Configuration keys can be set multiple times, in which case Mercurial
-will use the value that was configured last. As an example::
-
-    [spam]
-    eggs=large
-    ham=serrano
-    eggs=small
-
-This would set the configuration key named ``eggs`` to ``small``.
-
-It is also possible to define a section multiple times. A section can
-be redefined on the same and/or on different configuration files. For
-example::
-
-    [foo]
-    eggs=large
-    ham=serrano
-    eggs=small
-
-    [bar]
-    eggs=ham
-    green=
-       eggs
-
-    [foo]
-    ham=prosciutto
-    eggs=medium
-    bread=toasted
-
-This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
-of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
-respectively. As you can see there only thing that matters is the last
-value that was set for each of the configuration keys.
-
-If a configuration key is set multiple times in different
-configuration files the final value will depend on the order in which
-the different configuration files are read, with settings from earlier
-paths overriding later ones as described on the ``Files`` section
-above.
-
-A line of the form ``%include file`` will include ``file`` into the
-current configuration file. The inclusion is recursive, which means
-that included files can include other files. Filenames are relative to
-the configuration file in which the ``%include`` directive is found.
-Environment variables and ``~user`` constructs are expanded in
-``file``. This lets you do something like::
-
-  %include ~/.hgrc.d/$HOST.rc
-
-to include a different configuration file on each computer you use.
-
-A line with ``%unset name`` will remove ``name`` from the current
-section, if it has been set previously.
-
-The values are either free-form text strings, lists of text strings,
-or Boolean values. Boolean values can be set to true using any of "1",
-"yes", "true", or "on" and to false using "0", "no", "false", or "off"
-(all case insensitive).
-
-List values are separated by whitespace or comma, except when values are
-placed in double quotation marks::
-
-  allow_read = "John Doe, PhD", brian, betty
-
-Quotation marks can be escaped by prefixing them with a backslash. Only
-quotation marks at the beginning of a word is counted as a quotation
-(e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
-
-Sections
-========
-
-This section describes the different sections that may appear in a
-Mercurial configuration file, the purpose of each section, its possible
-keys, and their possible values.
-
-``alias``
----------
-
-Defines command aliases.
-
-Aliases allow you to define your own commands in terms of other
-commands (or aliases), optionally including arguments. Positional
-arguments in the form of ``$1``, ``$2``, etc. in the alias definition
-are expanded by Mercurial before execution. Positional arguments not
-already used by ``$N`` in the definition are put at the end of the
-command to be executed.
-
-Alias definitions consist of lines of the form::
-
-    <alias> = <command> [<argument>]...
-
-For example, this definition::
-
-    latest = log --limit 5
-
-creates a new command ``latest`` that shows only the five most recent
-changesets. You can define subsequent aliases using earlier ones::
-
-    stable5 = latest -b stable
-
-.. note::
-
-   It is possible to create aliases with the same names as
-   existing commands, which will then override the original
-   definitions. This is almost always a bad idea!
-
-An alias can start with an exclamation point (``!``) to make it a
-shell alias. A shell alias is executed with the shell and will let you
-run arbitrary commands. As an example, ::
-
-   echo = !echo $@
-
-will let you do ``hg echo foo`` to have ``foo`` printed in your
-terminal. A better example might be::
-
-   purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
-
-which will make ``hg purge`` delete all unknown files in the
-repository in the same manner as the purge extension.
-
-Positional arguments like ``$1``, ``$2``, etc. in the alias definition
-expand to the command arguments. Unmatched arguments are
-removed. ``$0`` expands to the alias name and ``$@`` expands to all
-arguments separated by a space. ``"$@"`` (with quotes) expands to all
-arguments quoted individually and separated by a space. These expansions
-happen before the command is passed to the shell.
-
-Shell aliases are executed in an environment where ``$HG`` expands to
-the path of the Mercurial that was used to execute the alias. This is
-useful when you want to call further Mercurial commands in a shell
-alias, as was done above for the purge alias. In addition,
-``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
-echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
-
-.. note::
-
-   Some global configuration options such as ``-R`` are
-   processed before shell aliases and will thus not be passed to
-   aliases.
-
-
-``annotate``
-------------
-
-Settings used when displaying file annotations. All values are
-Booleans and default to False. See :hg:`help config.diff` for
-related options for the diff command.
-
-``ignorews``
-    Ignore white space when comparing lines.
-
-``ignorewseol``
-    Ignore white space at the end of a line when comparing lines.
-
-``ignorewsamount``
-    Ignore changes in the amount of white space.
-
-``ignoreblanklines``
-    Ignore changes whose lines are all blank.
-
-
-``auth``
---------
-
-Authentication credentials and other authentication-like configuration
-for HTTP connections. This section allows you to store usernames and
-passwords for use when logging *into* HTTP servers. See
-:hg:`help config.web` if you want to configure *who* can login to
-your HTTP server.
-
-The following options apply to all hosts.
-
-``cookiefile``
-    Path to a file containing HTTP cookie lines. Cookies matching a
-    host will be sent automatically.
-
-    The file format uses the Mozilla cookies.txt format, which defines cookies
-    on their own lines. Each line contains 7 fields delimited by the tab
-    character (domain, is_domain_cookie, path, is_secure, expires, name,
-    value). For more info, do an Internet search for "Netscape cookies.txt
-    format."
-
-    Note: the cookies parser does not handle port numbers on domains. You
-    will need to remove ports from the domain for the cookie to be recognized.
-    This could result in a cookie being disclosed to an unwanted server.
-
-    The cookies file is read-only.
-
-Other options in this section are grouped by name and have the following
-format::
-
-    <name>.<argument> = <value>
-
-where ``<name>`` is used to group arguments into authentication
-entries. Example::
-
-    foo.prefix = hg.intevation.de/mercurial
-    foo.username = foo
-    foo.password = bar
-    foo.schemes = http https
-
-    bar.prefix = secure.example.org
-    bar.key = path/to/file.key
-    bar.cert = path/to/file.cert
-    bar.schemes = https
-
-Supported arguments:
-
-``prefix``
-    Either ``*`` or a URI prefix with or without the scheme part.
-    The authentication entry with the longest matching prefix is used
-    (where ``*`` matches everything and counts as a match of length
-    1). If the prefix doesn't include a scheme, the match is performed
-    against the URI with its scheme stripped as well, and the schemes
-    argument, q.v., is then subsequently consulted.
-
-``username``
-    Optional. Username to authenticate with. If not given, and the
-    remote site requires basic or digest authentication, the user will
-    be prompted for it. Environment variables are expanded in the
-    username letting you do ``foo.username = $USER``. If the URI
-    includes a username, only ``[auth]`` entries with a matching
-    username or without a username will be considered.
-
-``password``
-    Optional. Password to authenticate with. If not given, and the
-    remote site requires basic or digest authentication, the user
-    will be prompted for it.
-
-``key``
-    Optional. PEM encoded client certificate key file. Environment
-    variables are expanded in the filename.
-
-``cert``
-    Optional. PEM encoded client certificate chain file. Environment
-    variables are expanded in the filename.
-
-``schemes``
-    Optional. Space separated list of URI schemes to use this
-    authentication entry with. Only used if the prefix doesn't include
-    a scheme. Supported schemes are http and https. They will match
-    static-http and static-https respectively, as well.
-    (default: https)
-
-If no suitable authentication entry is found, the user is prompted
-for credentials as usual if required by the remote.
-
-``color``
----------
-
-Configure the Mercurial color mode. For details about how to define your custom
-effect and style see :hg:`help color`.
-
-``mode``
-    String: control the method used to output color. One of ``auto``, ``ansi``,
-    ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
-    use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
-    terminal. Any invalid value will disable color.
-
-``pagermode``
-    String: optional override of ``color.mode`` used with pager.
-
-    On some systems, terminfo mode may cause problems when using
-    color with ``less -R`` as a pager program. less with the -R option
-    will only display ECMA-48 color codes, and terminfo mode may sometimes
-    emit codes that less doesn't understand. You can work around this by
-    either using ansi mode (or auto mode), or by using less -r (which will
-    pass through all terminal control codes, not just color control
-    codes).
-
-    On some systems (such as MSYS in Windows), the terminal may support
-    a different color mode than the pager program.
-
-``commands``
-------------
-
-``commit.post-status``
-    Show status of files in the working directory after successful commit.
-    (default: False)
-
-``push.require-revs``
-    Require revisions to push be specified using one or more mechanisms such as
-    specifying them positionally on the command line, using ``-r``, ``-b``,
-    and/or ``-B`` on the command line, or using ``paths.<path>:pushrev`` in the
-    configuration. If this is enabled and revisions are not specified, the
-    command aborts.
-    (default: False)
-
-``resolve.confirm``
-    Confirm before performing action if no filename is passed.
-    (default: False)
-
-``resolve.explicit-re-merge``
-    Require uses of ``hg resolve`` to specify which action it should perform,
-    instead of re-merging files by default.
-    (default: False)
-
-``resolve.mark-check``
-    Determines what level of checking :hg:`resolve --mark` will perform before
-    marking files as resolved. Valid values are ``none`, ``warn``, and
-    ``abort``. ``warn`` will output a warning listing the file(s) that still
-    have conflict markers in them, but will still mark everything resolved.
-    ``abort`` will output the same warning but will not mark things as resolved.
-    If --all is passed and this is set to ``abort``, only a warning will be
-    shown (an error will not be raised).
-    (default: ``none``)
-
-``status.relative``
-    Make paths in :hg:`status` output relative to the current directory.
-    (default: False)
-
-``status.terse``
-    Default value for the --terse flag, which condenses status output.
-    (default: empty)
-
-``update.check``
-    Determines what level of checking :hg:`update` will perform before moving
-    to a destination revision. Valid values are ``abort``, ``none``,
-    ``linear``, and ``noconflict``. ``abort`` always fails if the working
-    directory has uncommitted changes. ``none`` performs no checking, and may
-    result in a merge with uncommitted changes. ``linear`` allows any update
-    as long as it follows a straight line in the revision history, and may
-    trigger a merge with uncommitted changes. ``noconflict`` will allow any
-    update which would not trigger a merge with uncommitted changes, if any
-    are present.
-    (default: ``linear``)
-
-``update.requiredest``
-    Require that the user pass a destination when running :hg:`update`.
-    For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
-    will be disallowed.
-    (default: False)
-
-``committemplate``
-------------------
-
-``changeset``
-    String: configuration in this section is used as the template to
-    customize the text shown in the editor when committing.
-
-In addition to pre-defined template keywords, commit log specific one
-below can be used for customization:
-
-``extramsg``
-    String: Extra message (typically 'Leave message empty to abort
-    commit.'). This may be changed by some commands or extensions.
-
-For example, the template configuration below shows as same text as
-one shown by default::
-
-    [committemplate]
-    changeset = {desc}\n\n
-        HG: Enter commit message.  Lines beginning with 'HG:' are removed.
-        HG: {extramsg}
-        HG: --
-        HG: user: {author}\n{ifeq(p2rev, "-1", "",
-       "HG: branch merge\n")
-       }HG: branch '{branch}'\n{if(activebookmark,
-       "HG: bookmark '{activebookmark}'\n")   }{subrepos %
-       "HG: subrepo {subrepo}\n"              }{file_adds %
-       "HG: added {file}\n"                   }{file_mods %
-       "HG: changed {file}\n"                 }{file_dels %
-       "HG: removed {file}\n"                 }{if(files, "",
-       "HG: no files changed\n")}
-
-``diff()``
-    String: show the diff (see :hg:`help templates` for detail)
-
-Sometimes it is helpful to show the diff of the changeset in the editor without
-having to prefix 'HG: ' to each line so that highlighting works correctly. For
-this, Mercurial provides a special string which will ignore everything below
-it::
-
-     HG: ------------------------ >8 ------------------------
-
-For example, the template configuration below will show the diff below the
-extra message::
-
-    [committemplate]
-    changeset = {desc}\n\n
-        HG: Enter commit message.  Lines beginning with 'HG:' are removed.
-        HG: {extramsg}
-        HG: ------------------------ >8 ------------------------
-        HG: Do not touch the line above.
-        HG: Everything below will be removed.
-        {diff()}
-
-.. note::
-
-   For some problematic encodings (see :hg:`help win32mbcs` for
-   detail), this customization should be configured carefully, to
-   avoid showing broken characters.
-
-   For example, if a multibyte character ending with backslash (0x5c) is
-   followed by the ASCII character 'n' in the customized template,
-   the sequence of backslash and 'n' is treated as line-feed unexpectedly
-   (and the multibyte character is broken, too).
-
-Customized template is used for commands below (``--edit`` may be
-required):
-
-- :hg:`backout`
-- :hg:`commit`
-- :hg:`fetch` (for merge commit only)
-- :hg:`graft`
-- :hg:`histedit`
-- :hg:`import`
-- :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
-- :hg:`rebase`
-- :hg:`shelve`
-- :hg:`sign`
-- :hg:`tag`
-- :hg:`transplant`
-
-Configuring items below instead of ``changeset`` allows showing
-customized message only for specific actions, or showing different
-messages for each action.
-
-- ``changeset.backout`` for :hg:`backout`
-- ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
-- ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
-- ``changeset.commit.normal.merge`` for :hg:`commit` on merges
-- ``changeset.commit.normal.normal`` for :hg:`commit` on other
-- ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
-- ``changeset.gpg.sign`` for :hg:`sign`
-- ``changeset.graft`` for :hg:`graft`
-- ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
-- ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
-- ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
-- ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
-- ``changeset.import.bypass`` for :hg:`import --bypass`
-- ``changeset.import.normal.merge`` for :hg:`import` on merges
-- ``changeset.import.normal.normal`` for :hg:`import` on other
-- ``changeset.mq.qnew`` for :hg:`qnew`
-- ``changeset.mq.qfold`` for :hg:`qfold`
-- ``changeset.mq.qrefresh`` for :hg:`qrefresh`
-- ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
-- ``changeset.rebase.merge`` for :hg:`rebase` on merges
-- ``changeset.rebase.normal`` for :hg:`rebase` on other
-- ``changeset.shelve.shelve`` for :hg:`shelve`
-- ``changeset.tag.add`` for :hg:`tag` without ``--remove``
-- ``changeset.tag.remove`` for :hg:`tag --remove`
-- ``changeset.transplant.merge`` for :hg:`transplant` on merges
-- ``changeset.transplant.normal`` for :hg:`transplant` on other
-
-These dot-separated lists of names are treated as hierarchical ones.
-For example, ``changeset.tag.remove`` customizes the commit message
-only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
-commit message for :hg:`tag` regardless of ``--remove`` option.
-
-When the external editor is invoked for a commit, the corresponding
-dot-separated list of names without the ``changeset.`` prefix
-(e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
-variable.
-
-In this section, items other than ``changeset`` can be referred from
-others. For example, the configuration to list committed files up
-below can be referred as ``{listupfiles}``::
-
-    [committemplate]
-    listupfiles = {file_adds %
-       "HG: added {file}\n"     }{file_mods %
-       "HG: changed {file}\n"   }{file_dels %
-       "HG: removed {file}\n"   }{if(files, "",
-       "HG: no files changed\n")}
-
-``decode/encode``
------------------
-
-Filters for transforming files on checkout/checkin. This would
-typically be used for newline processing or other
-localization/canonicalization of files.
-
-Filters consist of a filter pattern followed by a filter command.
-Filter patterns are globs by default, rooted at the repository root.
-For example, to match any file ending in ``.txt`` in the root
-directory only, use the pattern ``*.txt``. To match any file ending
-in ``.c`` anywhere in the repository, use the pattern ``**.c``.
-For each file only the first matching filter applies.
-
-The filter command can start with a specifier, either ``pipe:`` or
-``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
-
-A ``pipe:`` command must accept data on stdin and return the transformed
-data on stdout.
-
-Pipe example::
-
-  [encode]
-  # uncompress gzip files on checkin to improve delta compression
-  # note: not necessarily a good idea, just an example
-  *.gz = pipe: gunzip
-
-  [decode]
-  # recompress gzip files when writing them to the working dir (we
-  # can safely omit "pipe:", because it's the default)
-  *.gz = gzip
-
-A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
-with the name of a temporary file that contains the data to be
-filtered by the command. The string ``OUTFILE`` is replaced with the name
-of an empty temporary file, where the filtered data must be written by
-the command.
-
-.. container:: windows
-
-   .. note::
-
-     The tempfile mechanism is recommended for Windows systems,
-     where the standard shell I/O redirection operators often have
-     strange effects and may corrupt the contents of your files.
-
-This filter mechanism is used internally by the ``eol`` extension to
-translate line ending characters between Windows (CRLF) and Unix (LF)
-format. We suggest you use the ``eol`` extension for convenience.
-
-
-``defaults``
-------------
-
-(defaults are deprecated. Don't use them. Use aliases instead.)
-
-Use the ``[defaults]`` section to define command defaults, i.e. the
-default options/arguments to pass to the specified commands.
-
-The following example makes :hg:`log` run in verbose mode, and
-:hg:`status` show only the modified files, by default::
-
-  [defaults]
-  log = -v
-  status = -m
-
-The actual commands, instead of their aliases, must be used when
-defining command defaults. The command defaults will also be applied
-to the aliases of the commands defined.
-
-
-``diff``
---------
-
-Settings used when displaying diffs. Everything except for ``unified``
-is a Boolean and defaults to False. See :hg:`help config.annotate`
-for related options for the annotate command.
-
-``git``
-    Use git extended diff format.
-
-``nobinary``
-    Omit git binary patches.
-
-``nodates``
-    Don't include dates in diff headers.
-
-``noprefix``
-    Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
-
-``showfunc``
-    Show which function each change is in.
-
-``ignorews``
-    Ignore white space when comparing lines.
-
-``ignorewsamount``
-    Ignore changes in the amount of white space.
-
-``ignoreblanklines``
-    Ignore changes whose lines are all blank.
-
-``unified``
-    Number of lines of context to show.
-
-``word-diff``
-    Highlight changed words.
-
-``email``
----------
-
-Settings for extensions that send email messages.
-
-``from``
-    Optional. Email address to use in "From" header and SMTP envelope
-    of outgoing messages.
-
-``to``
-    Optional. Comma-separated list of recipients' email addresses.
-
-``cc``
-    Optional. Comma-separated list of carbon copy recipients'
-    email addresses.
-
-``bcc``
-    Optional. Comma-separated list of blind carbon copy recipients'
-    email addresses.
-
-``method``
-    Optional. Method to use to send email messages. If value is ``smtp``
-    (default), use SMTP (see the ``[smtp]`` section for configuration).
-    Otherwise, use as name of program to run that acts like sendmail
-    (takes ``-f`` option for sender, list of recipients on command line,
-    message on stdin). Normally, setting this to ``sendmail`` or
-    ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
-
-``charsets``
-    Optional. Comma-separated list of character sets considered
-    convenient for recipients. Addresses, headers, and parts not
-    containing patches of outgoing messages will be encoded in the
-    first character set to which conversion from local encoding
-    (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
-    conversion fails, the text in question is sent as is.
-    (default: '')
-
-    Order of outgoing email character sets:
-
-    1. ``us-ascii``: always first, regardless of settings
-    2. ``email.charsets``: in order given by user
-    3. ``ui.fallbackencoding``: if not in email.charsets
-    4. ``$HGENCODING``: if not in email.charsets
-    5. ``utf-8``: always last, regardless of settings
-
-Email example::
-
-  [email]
-  from = Joseph User <joe.user@example.com>
-  method = /usr/sbin/sendmail
-  # charsets for western Europeans
-  # us-ascii, utf-8 omitted, as they are tried first and last
-  charsets = iso-8859-1, iso-8859-15, windows-1252
-
-
-``extensions``
---------------
-
-Mercurial has an extension mechanism for adding new features. To
-enable an extension, create an entry for it in this section.
-
-If you know that the extension is already in Python's search path,
-you can give the name of the module, followed by ``=``, with nothing
-after the ``=``.
-
-Otherwise, give a name that you choose, followed by ``=``, followed by
-the path to the ``.py`` file (including the file name extension) that
-defines the extension.
-
-To explicitly disable an extension that is enabled in an hgrc of
-broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
-or ``foo = !`` when path is not supplied.
-
-Example for ``~/.hgrc``::
-
-  [extensions]
-  # (the churn extension will get loaded from Mercurial's path)
-  churn =
-  # (this extension will get loaded from the file specified)
-  myfeature = ~/.hgext/myfeature.py
-
-
-``format``
-----------
-
-Configuration that controls the repository format. Newer format options are more
-powerful but incompatible with some older versions of Mercurial. Format options
-are considered at repository initialization only. You need to make a new clone
-for config change to be taken into account.
-
-For more details about repository format and version compatibility, see
-https://www.mercurial-scm.org/wiki/MissingRequirement
-
-``usegeneraldelta``
-    Enable or disable the "generaldelta" repository format which improves
-    repository compression by allowing "revlog" to store delta against arbitrary
-    revision instead of the previous stored one. This provides significant
-    improvement for repositories with branches.
-
-    Repositories with this on-disk format require Mercurial version 1.9.
-
-    Enabled by default.
-
-``dotencode``
-    Enable or disable the "dotencode" repository format which enhances
-    the "fncache" repository format (which has to be enabled to use
-    dotencode) to avoid issues with filenames starting with ._ on
-    Mac OS X and spaces on Windows.
-
-    Repositories with this on-disk format require Mercurial version 1.7.
-
-    Enabled by default.
-
-``usefncache``
-    Enable or disable the "fncache" repository format which enhances
-    the "store" repository format (which has to be enabled to use
-    fncache) to allow longer filenames and avoids using Windows
-    reserved names, e.g. "nul".
-
-    Repositories with this on-disk format require Mercurial version 1.1.
-
-    Enabled by default.
-
-``usestore``
-    Enable or disable the "store" repository format which improves
-    compatibility with systems that fold case or otherwise mangle
-    filenames. Disabling this option will allow you to store longer filenames
-    in some situations at the expense of compatibility.
-
-    Repositories with this on-disk format require Mercurial version 0.9.4.
-
-    Enabled by default.
-
-``sparse-revlog``
-    Enable or disable the ``sparse-revlog`` delta strategy. This format improves
-    delta re-use inside revlog. For very branchy repositories, it results in a
-    smaller store. For repositories with many revisions, it also helps
-    performance (by using shortened delta chains.)
-
-    Repositories with this on-disk format require Mercurial version 4.7
-
-    Enabled by default.
-
-``revlog-compression``
-    Compression algorithm used by revlog. Supported value are `zlib` and `zstd`.
-    The `zlib` engine is the historical default of Mercurial. `zstd` is a newer
-    format that is usually a net win over `zlib` operating faster at better
-    compression rate. Use `zstd` to reduce CPU usage.
-
-    On some system, Mercurial installation may lack `zstd` supports. Default is `zlib`.
-
-``bookmarks-in-store``
-    Store bookmarks in .hg/store/. This means that bookmarks are shared when
-    using `hg share` regardless of the `-B` option.
-
-    Repositories with this on-disk format require Mercurial version 5.1.
-
-    Disabled by default.
-
-
-``graph``
----------
-
-Web graph view configuration. This section let you change graph
-elements display properties by branches, for instance to make the
-``default`` branch stand out.
-
-Each line has the following format::
-
-    <branch>.<argument> = <value>
-
-where ``<branch>`` is the name of the branch being
-customized. Example::
-
-    [graph]
-    # 2px width
-    default.width = 2
-    # red color
-    default.color = FF0000
-
-Supported arguments:
-
-``width``
-    Set branch edges width in pixels.
-
-``color``
-    Set branch edges color in hexadecimal RGB notation.
-
-``hooks``
----------
-
-Commands or Python functions that get automatically executed by
-various actions such as starting or finishing a commit. Multiple
-hooks can be run for the same action by appending a suffix to the
-action. Overriding a site-wide hook can be done by changing its
-value or setting it to an empty string.  Hooks can be prioritized
-by adding a prefix of ``priority.`` to the hook name on a new line
-and setting the priority. The default priority is 0.
-
-Example ``.hg/hgrc``::
-
-  [hooks]
-  # update working directory after adding changesets
-  changegroup.update = hg update
-  # do not use the site-wide hook
-  incoming =
-  incoming.email = /my/email/hook
-  incoming.autobuild = /my/build/hook
-  # force autobuild hook to run before other incoming hooks
-  priority.incoming.autobuild = 1
-
-Most hooks are run with environment variables set that give useful
-additional information. For each hook below, the environment variables
-it is passed are listed with names in the form ``$HG_foo``. The
-``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
-They contain the type of hook which triggered the run and the full name
-of the hook in the config, respectively. In the example above, this will
-be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
-
-.. container:: windows
-
-  Some basic Unix syntax can be enabled for portability, including ``$VAR``
-  and ``${VAR}`` style variables.  A ``~`` followed by ``\`` or ``/`` will
-  be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
-  on Unix.  To use a literal ``$`` or ``~``, it must be escaped with a back
-  slash or inside of a strong quote.  Strong quotes will be replaced by
-  double quotes after processing.
-
-  This feature is enabled by adding a prefix of ``tonative.`` to the hook
-  name on a new line, and setting it to ``True``.  For example::
-
-    [hooks]
-    incoming.autobuild = /my/build/hook
-    # enable translation to cmd.exe syntax for autobuild hook
-    tonative.incoming.autobuild = True
-
-``changegroup``
-  Run after a changegroup has been added via push, pull or unbundle.  The ID of
-  the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
-  The URL from which changes came is in ``$HG_URL``.
-
-``commit``
-  Run after a changeset has been created in the local repository. The ID
-  of the newly created changeset is in ``$HG_NODE``. Parent changeset
-  IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
-
-``incoming``
-  Run after a changeset has been pulled, pushed, or unbundled into
-  the local repository. The ID of the newly arrived changeset is in
-  ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
-
-``outgoing``
-  Run after sending changes from the local repository to another. The ID of
-  first changeset sent is in ``$HG_NODE``. The source of operation is in
-  ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
-
-``post-<command>``
-  Run after successful invocations of the associated command. The
-  contents of the command line are passed as ``$HG_ARGS`` and the result
-  code in ``$HG_RESULT``. Parsed command line arguments are passed as
-  ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
-  the python data internally passed to <command>. ``$HG_OPTS`` is a
-  dictionary of options (with unspecified options set to their defaults).
-  ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
-
-``fail-<command>``
-  Run after a failed invocation of an associated command. The contents
-  of the command line are passed as ``$HG_ARGS``. Parsed command line
-  arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
-  string representations of the python data internally passed to
-  <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
-  options set to their defaults). ``$HG_PATS`` is a list of arguments.
-  Hook failure is ignored.
-
-``pre-<command>``
-  Run before executing the associated command. The contents of the
-  command line are passed as ``$HG_ARGS``. Parsed command line arguments
-  are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
-  representations of the data internally passed to <command>. ``$HG_OPTS``
-  is a dictionary of options (with unspecified options set to their
-  defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
-  failure, the command doesn't execute and Mercurial returns the failure
-  code.
-
-``prechangegroup``
-  Run before a changegroup is added via push, pull or unbundle. Exit
-  status 0 allows the changegroup to proceed. A non-zero status will
-  cause the push, pull or unbundle to fail. The URL from which changes
-  will come is in ``$HG_URL``.
-
-``precommit``
-  Run before starting a local commit. Exit status 0 allows the
-  commit to proceed. A non-zero status will cause the commit to fail.
-  Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
-
-``prelistkeys``
-  Run before listing pushkeys (like bookmarks) in the
-  repository. A non-zero status will cause failure. The key namespace is
-  in ``$HG_NAMESPACE``.
-
-``preoutgoing``
-  Run before collecting changes to send from the local repository to
-  another. A non-zero status will cause failure. This lets you prevent
-  pull over HTTP or SSH. It can also prevent propagating commits (via
-  local pull, push (outbound) or bundle commands), but not completely,
-  since you can just copy files instead. The source of operation is in
-  ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
-  SSH or HTTP repository. If "push", "pull" or "bundle", the operation
-  is happening on behalf of a repository on same system.
-
-``prepushkey``
-  Run before a pushkey (like a bookmark) is added to the
-  repository. A non-zero status will cause the key to be rejected. The
-  key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
-  the old value (if any) is in ``$HG_OLD``, and the new value is in
-  ``$HG_NEW``.
-
-``pretag``
-  Run before creating a tag. Exit status 0 allows the tag to be
-  created. A non-zero status will cause the tag to fail. The ID of the
-  changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
-  tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
-
-``pretxnopen``
-  Run before any new repository transaction is open. The reason for the
-  transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
-  transaction will be in ``HG_TXNID``. A non-zero status will prevent the
-  transaction from being opened.
-
-``pretxnclose``
-  Run right before the transaction is actually finalized. Any repository change
-  will be visible to the hook program. This lets you validate the transaction
-  content or change it. Exit status 0 allows the commit to proceed. A non-zero
-  status will cause the transaction to be rolled back. The reason for the
-  transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
-  the transaction will be in ``HG_TXNID``. The rest of the available data will
-  vary according the transaction type. New changesets will add ``$HG_NODE``
-  (the ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last
-  added changeset), ``$HG_URL`` and ``$HG_SOURCE`` variables.  Bookmark and
-  phase changes will set ``HG_BOOKMARK_MOVED`` and ``HG_PHASES_MOVED`` to ``1``
-  respectively, etc.
-
-``pretxnclose-bookmark``
-  Run right before a bookmark change is actually finalized. Any repository
-  change will be visible to the hook program. This lets you validate the
-  transaction content or change it. Exit status 0 allows the commit to
-  proceed. A non-zero status will cause the transaction to be rolled back.
-  The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
-  bookmark location will be available in ``$HG_NODE`` while the previous
-  location will be available in ``$HG_OLDNODE``. In case of a bookmark
-  creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
-  will be empty.
-  In addition, the reason for the transaction opening will be in
-  ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
-  ``HG_TXNID``.
-
-``pretxnclose-phase``
-  Run right before a phase change is actually finalized. Any repository change
-  will be visible to the hook program. This lets you validate the transaction
-  content or change it. Exit status 0 allows the commit to proceed.  A non-zero
-  status will cause the transaction to be rolled back. The hook is called
-  multiple times, once for each revision affected by a phase change.
-  The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
-  while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
-  will be empty.  In addition, the reason for the transaction opening will be in
-  ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
-  ``HG_TXNID``. The hook is also run for newly added revisions. In this case
-  the ``$HG_OLDPHASE`` entry will be empty.
-
-``txnclose``
-  Run after any repository transaction has been committed. At this
-  point, the transaction can no longer be rolled back. The hook will run
-  after the lock is released. See :hg:`help config.hooks.pretxnclose` for
-  details about available variables.
-
-``txnclose-bookmark``
-  Run after any bookmark change has been committed. At this point, the
-  transaction can no longer be rolled back. The hook will run after the lock
-  is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
-  about available variables.
-
-``txnclose-phase``
-  Run after any phase change has been committed. At this point, the
-  transaction can no longer be rolled back. The hook will run after the lock
-  is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
-  available variables.
-
-``txnabort``
-  Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
-  for details about available variables.
-
-``pretxnchangegroup``
-  Run after a changegroup has been added via push, pull or unbundle, but before
-  the transaction has been committed. The changegroup is visible to the hook
-  program. This allows validation of incoming changes before accepting them.
-  The ID of the first new changeset is in ``$HG_NODE`` and last is in
-  ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
-  status will cause the transaction to be rolled back, and the push, pull or
-  unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
-
-``pretxncommit``
-  Run after a changeset has been created, but before the transaction is
-  committed. The changeset is visible to the hook program. This allows
-  validation of the commit message and changes. Exit status 0 allows the
-  commit to proceed. A non-zero status will cause the transaction to
-  be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
-  changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
-
-``preupdate``
-  Run before updating the working directory. Exit status 0 allows
-  the update to proceed. A non-zero status will prevent the update.
-  The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
-  merge, the ID of second new parent is in ``$HG_PARENT2``.
-
-``listkeys``
-  Run after listing pushkeys (like bookmarks) in the repository. The
-  key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
-  dictionary containing the keys and values.
-
-``pushkey``
-  Run after a pushkey (like a bookmark) is added to the
-  repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
-  ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
-  value is in ``$HG_NEW``.
-
-``tag``
-  Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
-  The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
-  the repository if ``$HG_LOCAL=0``.
-
-``update``
-  Run after updating the working directory. The changeset ID of first
-  new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
-  parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
-  update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
-
-.. note::
-
-   It is generally better to use standard hooks rather than the
-   generic pre- and post- command hooks, as they are guaranteed to be
-   called in the appropriate contexts for influencing transactions.
-   Also, hooks like "commit" will be called in all contexts that
-   generate a commit (e.g. tag) and not just the commit command.
-
-.. note::
-
-   Environment variables with empty values may not be passed to
-   hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
-   will have an empty value under Unix-like platforms for non-merge
-   changesets, while it will not be available at all under Windows.
-
-The syntax for Python hooks is as follows::
-
-  hookname = python:modulename.submodule.callable
-  hookname = python:/path/to/python/module.py:callable
-
-Python hooks are run within the Mercurial process. Each hook is
-called with at least three keyword arguments: a ui object (keyword
-``ui``), a repository object (keyword ``repo``), and a ``hooktype``
-keyword that tells what kind of hook is used. Arguments listed as
-environment variables above are passed as keyword arguments, with no
-``HG_`` prefix, and names in lower case.
-
-If a Python hook returns a "true" value or raises an exception, this
-is treated as a failure.
-
-
-``hostfingerprints``
---------------------
-
-(Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
-
-Fingerprints of the certificates of known HTTPS servers.
-
-A HTTPS connection to a server with a fingerprint configured here will
-only succeed if the servers certificate matches the fingerprint.
-This is very similar to how ssh known hosts works.
-
-The fingerprint is the SHA-1 hash value of the DER encoded certificate.
-Multiple values can be specified (separated by spaces or commas). This can
-be used to define both old and new fingerprints while a host transitions
-to a new certificate.
-
-The CA chain and web.cacerts is not used for servers with a fingerprint.
-
-For example::
-
-    [hostfingerprints]
-    hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
-    hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
-
-``hostsecurity``
-----------------
-
-Used to specify global and per-host security settings for connecting to
-other machines.
-
-The following options control default behavior for all hosts.
-
-``ciphers``
-    Defines the cryptographic ciphers to use for connections.
-
-    Value must be a valid OpenSSL Cipher List Format as documented at
-    https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
-
-    This setting is for advanced users only. Setting to incorrect values
-    can significantly lower connection security or decrease performance.
-    You have been warned.
-
-    This option requires Python 2.7.
-
-``minimumprotocol``
-    Defines the minimum channel encryption protocol to use.
-
-    By default, the highest version of TLS supported by both client and server
-    is used.
-
-    Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
-
-    When running on an old Python version, only ``tls1.0`` is allowed since
-    old versions of Python only support up to TLS 1.0.
-
-    When running a Python that supports modern TLS versions, the default is
-    ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
-    weakens security and should only be used as a feature of last resort if
-    a server does not support TLS 1.1+.
-
-Options in the ``[hostsecurity]`` section can have the form
-``hostname``:``setting``. This allows multiple settings to be defined on a
-per-host basis.
-
-The following per-host settings can be defined.
-
-``ciphers``
-    This behaves like ``ciphers`` as described above except it only applies
-    to the host on which it is defined.
-
-``fingerprints``
-    A list of hashes of the DER encoded peer/remote certificate. Values have
-    the form ``algorithm``:``fingerprint``. e.g.
-    ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
-    In addition, colons (``:``) can appear in the fingerprint part.
-
-    The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
-    ``sha512``.
-
-    Use of ``sha256`` or ``sha512`` is preferred.
-
-    If a fingerprint is specified, the CA chain is not validated for this
-    host and Mercurial will require the remote certificate to match one
-    of the fingerprints specified. This means if the server updates its
-    certificate, Mercurial will abort until a new fingerprint is defined.
-    This can provide stronger security than traditional CA-based validation
-    at the expense of convenience.
-
-    This option takes precedence over ``verifycertsfile``.
-
-``minimumprotocol``
-    This behaves like ``minimumprotocol`` as described above except it
-    only applies to the host on which it is defined.
-
-``verifycertsfile``
-    Path to file a containing a list of PEM encoded certificates used to
-    verify the server certificate. Environment variables and ``~user``
-    constructs are expanded in the filename.
-
-    The server certificate or the certificate's certificate authority (CA)
-    must match a certificate from this file or certificate verification
-    will fail and connections to the server will be refused.
-
-    If defined, only certificates provided by this file will be used:
-    ``web.cacerts`` and any system/default certificates will not be
-    used.
-
-    This option has no effect if the per-host ``fingerprints`` option
-    is set.
-
-    The format of the file is as follows::
-
-        -----BEGIN CERTIFICATE-----
-        ... (certificate in base64 PEM encoding) ...
-        -----END CERTIFICATE-----
-        -----BEGIN CERTIFICATE-----
-        ... (certificate in base64 PEM encoding) ...
-        -----END CERTIFICATE-----
-
-For example::
-
-    [hostsecurity]
-    hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
-    hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
-    hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
-    foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
-
-To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
-when connecting to ``hg.example.com``::
-
-    [hostsecurity]
-    minimumprotocol = tls1.2
-    hg.example.com:minimumprotocol = tls1.1
-
-``http_proxy``
---------------
-
-Used to access web-based Mercurial repositories through a HTTP
-proxy.
-
-``host``
-    Host name and (optional) port of the proxy server, for example
-    "myproxy:8000".
-
-``no``
-    Optional. Comma-separated list of host names that should bypass
-    the proxy.
-
-``passwd``
-    Optional. Password to authenticate with at the proxy server.
-
-``user``
-    Optional. User name to authenticate with at the proxy server.
-
-``always``
-    Optional. Always use the proxy, even for localhost and any entries
-    in ``http_proxy.no``. (default: False)
-
-``http``
-----------
-
-Used to configure access to Mercurial repositories via HTTP.
-
-``timeout``
-    If set, blocking operations will timeout after that many seconds.
-    (default: None)
-
-``merge``
----------
-
-This section specifies behavior during merges and updates.
-
-``checkignored``
-   Controls behavior when an ignored file on disk has the same name as a tracked
-   file in the changeset being merged or updated to, and has different
-   contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
-   abort on such files. With ``warn``, warn on such files and back them up as
-   ``.orig``. With ``ignore``, don't print a warning and back them up as
-   ``.orig``. (default: ``abort``)
-
-``checkunknown``
-   Controls behavior when an unknown file that isn't ignored has the same name
-   as a tracked file in the changeset being merged or updated to, and has
-   different contents. Similar to ``merge.checkignored``, except for files that
-   are not ignored. (default: ``abort``)
-
-``on-failure``
-   When set to ``continue`` (the default), the merge process attempts to
-   merge all unresolved files using the merge chosen tool, regardless of
-   whether previous file merge attempts during the process succeeded or not.
-   Setting this to ``prompt`` will prompt after any merge failure continue
-   or halt the merge process. Setting this to ``halt`` will automatically
-   halt the merge process on any merge tool failure. The merge process
-   can be restarted by using the ``resolve`` command. When a merge is
-   halted, the repository is left in a normal ``unresolved`` merge state.
-   (default: ``continue``)
-
-``strict-capability-check``
-   Whether capabilities of internal merge tools are checked strictly
-   or not, while examining rules to decide merge tool to be used.
-   (default: False)
-
-``merge-patterns``
-------------------
-
-This section specifies merge tools to associate with particular file
-patterns. Tools matched here will take precedence over the default
-merge tool. Patterns are globs by default, rooted at the repository
-root.
-
-Example::
-
-  [merge-patterns]
-  **.c = kdiff3
-  **.jpg = myimgmerge
-
-``merge-tools``
----------------
-
-This section configures external merge tools to use for file-level
-merges. This section has likely been preconfigured at install time.
-Use :hg:`config merge-tools` to check the existing configuration.
-Also see :hg:`help merge-tools` for more details.
-
-Example ``~/.hgrc``::
-
-  [merge-tools]
-  # Override stock tool location
-  kdiff3.executable = ~/bin/kdiff3
-  # Specify command line
-  kdiff3.args = $base $local $other -o $output
-  # Give higher priority
-  kdiff3.priority = 1
-
-  # Changing the priority of preconfigured tool
-  meld.priority = 0
-
-  # Disable a preconfigured tool
-  vimdiff.disabled = yes
-
-  # Define new tool
-  myHtmlTool.args = -m $local $other $base $output
-  myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
-  myHtmlTool.priority = 1
-
-Supported arguments:
-
-``priority``
-  The priority in which to evaluate this tool.
-  (default: 0)
-
-``executable``
-  Either just the name of the executable or its pathname.
-
-  .. container:: windows
-
-    On Windows, the path can use environment variables with ${ProgramFiles}
-    syntax.
-
-  (default: the tool name)
-
-``args``
-  The arguments to pass to the tool executable. You can refer to the
-  files being merged as well as the output file through these
-  variables: ``$base``, ``$local``, ``$other``, ``$output``.
-
-  The meaning of ``$local`` and ``$other`` can vary depending on which action is
-  being performed. During an update or merge, ``$local`` represents the original
-  state of the file, while ``$other`` represents the commit you are updating to or
-  the commit you are merging with. During a rebase, ``$local`` represents the
-  destination of the rebase, and ``$other`` represents the commit being rebased.
-
-  Some operations define custom labels to assist with identifying the revisions,
-  accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
-  labels are not available, these will be ``local``, ``other``, and ``base``,
-  respectively.
-  (default: ``$local $base $other``)
-
-``premerge``
-  Attempt to run internal non-interactive 3-way merge tool before
-  launching external tool.  Options are ``true``, ``false``, ``keep`` or
-  ``keep-merge3``. The ``keep`` option will leave markers in the file if the
-  premerge fails. The ``keep-merge3`` will do the same but include information
-  about the base of the merge in the marker (see internal :merge3 in
-  :hg:`help merge-tools`).
-  (default: True)
-
-``binary``
-  This tool can merge binary files. (default: False, unless tool
-  was selected by file pattern match)
-
-``symlink``
-  This tool can merge symlinks. (default: False)
-
-``check``
-  A list of merge success-checking options:
-
-  ``changed``
-    Ask whether merge was successful when the merged file shows no changes.
-  ``conflicts``
-    Check whether there are conflicts even though the tool reported success.
-  ``prompt``
-    Always prompt for merge success, regardless of success reported by tool.
-
-``fixeol``
-  Attempt to fix up EOL changes caused by the merge tool.
-  (default: False)
-
-``gui``
-  This tool requires a graphical interface to run. (default: False)
-
-``mergemarkers``
-  Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
-  ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
-  ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
-  markers generated during premerge will be ``detailed`` if either this option or
-  the corresponding option in the ``[ui]`` section is ``detailed``.
-  (default: ``basic``)
-
-``mergemarkertemplate``
-  This setting can be used to override ``mergemarkertemplate`` from the ``[ui]``
-  section on a per-tool basis; this applies to the ``$label``-prefixed variables
-  and to the conflict markers that are generated if ``premerge`` is ``keep` or
-  ``keep-merge3``. See the corresponding variable in ``[ui]`` for more
-  information.
-
-.. container:: windows
-
-  ``regkey``
-    Windows registry key which describes install location of this
-    tool. Mercurial will search for this key first under
-    ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
-    (default: None)
-
-  ``regkeyalt``
-    An alternate Windows registry key to try if the first key is not
-    found.  The alternate key uses the same ``regname`` and ``regappend``
-    semantics of the primary key.  The most common use for this key
-    is to search for 32bit applications on 64bit operating systems.
-    (default: None)
-
-  ``regname``
-    Name of value to read from specified registry key.
-    (default: the unnamed (default) value)
-
-  ``regappend``
-    String to append to the value read from the registry, typically
-    the executable name of the tool.
-    (default: None)
-
-``pager``
----------
-
-Setting used to control when to paginate and with what external tool. See
-:hg:`help pager` for details.
-
-``pager``
-    Define the external tool used as pager.
-
-    If no pager is set, Mercurial uses the environment variable $PAGER.
-    If neither pager.pager, nor $PAGER is set, a default pager will be
-    used, typically `less` on Unix and `more` on Windows. Example::
-
-      [pager]
-      pager = less -FRX
-
-``ignore``
-    List of commands to disable the pager for. Example::
-
-      [pager]
-      ignore = version, help, update
-
-``patch``
----------
-
-Settings used when applying patches, for instance through the 'import'
-command or with Mercurial Queues extension.
-
-``eol``
-    When set to 'strict' patch content and patched files end of lines
-    are preserved. When set to ``lf`` or ``crlf``, both files end of
-    lines are ignored when patching and the result line endings are
-    normalized to either LF (Unix) or CRLF (Windows). When set to
-    ``auto``, end of lines are again ignored while patching but line
-    endings in patched files are normalized to their original setting
-    on a per-file basis. If target file does not exist or has no end
-    of line, patch line endings are preserved.
-    (default: strict)
-
-``fuzz``
-    The number of lines of 'fuzz' to allow when applying patches. This
-    controls how much context the patcher is allowed to ignore when
-    trying to apply a patch.
-    (default: 2)
-
-``paths``
----------
-
-Assigns symbolic names and behavior to repositories.
-
-Options are symbolic names defining the URL or directory that is the
-location of the repository. Example::
-
-    [paths]
-    my_server = https://example.com/my_repo
-    local_path = /home/me/repo
-
-These symbolic names can be used from the command line. To pull
-from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
-:hg:`push local_path`.
-
-Options containing colons (``:``) denote sub-options that can influence
-behavior for that specific path. Example::
-
-    [paths]
-    my_server = https://example.com/my_path
-    my_server:pushurl = ssh://example.com/my_path
-
-The following sub-options can be defined:
-
-``pushurl``
-   The URL to use for push operations. If not defined, the location
-   defined by the path's main entry is used.
-
-``pushrev``
-   A revset defining which revisions to push by default.
-
-   When :hg:`push` is executed without a ``-r`` argument, the revset
-   defined by this sub-option is evaluated to determine what to push.
-
-   For example, a value of ``.`` will push the working directory's
-   revision by default.
-
-   Revsets specifying bookmarks will not result in the bookmark being
-   pushed.
-
-The following special named paths exist:
-
-``default``
-   The URL or directory to use when no source or remote is specified.
-
-   :hg:`clone` will automatically define this path to the location the
-   repository was cloned from.
-
-``default-push``
-   (deprecated) The URL or directory for the default :hg:`push` location.
-   ``default:pushurl`` should be used instead.
-
-``phases``
-----------
-
-Specifies default handling of phases. See :hg:`help phases` for more
-information about working with phases.
-
-``publish``
-    Controls draft phase behavior when working as a server. When true,
-    pushed changesets are set to public in both client and server and
-    pulled or cloned changesets are set to public in the client.
-    (default: True)
-
-``new-commit``
-    Phase of newly-created commits.
-    (default: draft)
-
-``checksubrepos``
-    Check the phase of the current revision of each subrepository. Allowed
-    values are "ignore", "follow" and "abort". For settings other than
-    "ignore", the phase of the current revision of each subrepository is
-    checked before committing the parent repository. If any of those phases is
-    greater than the phase of the parent repository (e.g. if a subrepo is in a
-    "secret" phase while the parent repo is in "draft" phase), the commit is
-    either aborted (if checksubrepos is set to "abort") or the higher phase is
-    used for the parent repository commit (if set to "follow").
-    (default: follow)
-
-
-``profiling``
--------------
-
-Specifies profiling type, format, and file output. Two profilers are
-supported: an instrumenting profiler (named ``ls``), and a sampling
-profiler (named ``stat``).
-
-In this section description, 'profiling data' stands for the raw data
-collected during profiling, while 'profiling report' stands for a
-statistical text report generated from the profiling data.
-
-``enabled``
-    Enable the profiler.
-    (default: false)
-
-    This is equivalent to passing ``--profile`` on the command line.
-
-``type``
-    The type of profiler to use.
-    (default: stat)
-
-    ``ls``
-      Use Python's built-in instrumenting profiler. This profiler
-      works on all platforms, but each line number it reports is the
-      first line of a function. This restriction makes it difficult to
-      identify the expensive parts of a non-trivial function.
-    ``stat``
-      Use a statistical profiler, statprof. This profiler is most
-      useful for profiling commands that run for longer than about 0.1
-      seconds.
-
-``format``
-    Profiling format.  Specific to the ``ls`` instrumenting profiler.
-    (default: text)
-
-    ``text``
-      Generate a profiling report. When saving to a file, it should be
-      noted that only the report is saved, and the profiling data is
-      not kept.
-    ``kcachegrind``
-      Format profiling data for kcachegrind use: when saving to a
-      file, the generated file can directly be loaded into
-      kcachegrind.
-
-``statformat``
-    Profiling format for the ``stat`` profiler.
-    (default: hotpath)
-
-    ``hotpath``
-      Show a tree-based display containing the hot path of execution (where
-      most time was spent).
-    ``bymethod``
-      Show a table of methods ordered by how frequently they are active.
-    ``byline``
-      Show a table of lines in files ordered by how frequently they are active.
-    ``json``
-      Render profiling data as JSON.
-
-``frequency``
-    Sampling frequency.  Specific to the ``stat`` sampling profiler.
-    (default: 1000)
-
-``output``
-    File path where profiling data or report should be saved. If the
-    file exists, it is replaced. (default: None, data is printed on
-    stderr)
-
-``sort``
-    Sort field.  Specific to the ``ls`` instrumenting profiler.
-    One of ``callcount``, ``reccallcount``, ``totaltime`` and
-    ``inlinetime``.
-    (default: inlinetime)
-
-``time-track``
-    Control if the stat profiler track ``cpu`` or ``real`` time.
-    (default: ``cpu`` on Windows, otherwise ``real``)
-
-``limit``
-    Number of lines to show. Specific to the ``ls`` instrumenting profiler.
-    (default: 30)
-
-``nested``
-    Show at most this number of lines of drill-down info after each main entry.
-    This can help explain the difference between Total and Inline.
-    Specific to the ``ls`` instrumenting profiler.
-    (default: 0)
-
-``showmin``
-    Minimum fraction of samples an entry must have for it to be displayed.
-    Can be specified as a float between ``0.0`` and ``1.0`` or can have a
-    ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
-
-    Only used by the ``stat`` profiler.
-
-    For the ``hotpath`` format, default is ``0.05``.
-    For the ``chrome`` format, default is ``0.005``.
-
-    The option is unused on other formats.
-
-``showmax``
-    Maximum fraction of samples an entry can have before it is ignored in
-    display. Values format is the same as ``showmin``.
-
-    Only used by the ``stat`` profiler.
-
-    For the ``chrome`` format, default is ``0.999``.
-
-    The option is unused on other formats.
-
-``showtime``
-    Show time taken as absolute durations, in addition to percentages.
-    Only used by the ``hotpath`` format.
-    (default: true)
-
-``progress``
-------------
-
-Mercurial commands can draw progress bars that are as informative as
-possible. Some progress bars only offer indeterminate information, while others
-have a definite end point.
-
-``debug``
-    Whether to print debug info when updating the progress bar. (default: False)
-
-``delay``
-    Number of seconds (float) before showing the progress bar. (default: 3)
-
-``changedelay``
-    Minimum delay before showing a new topic. When set to less than 3 * refresh,
-    that value will be used instead. (default: 1)
-
-``estimateinterval``
-    Maximum sampling interval in seconds for speed and estimated time
-    calculation. (default: 60)
-
-``refresh``
-    Time in seconds between refreshes of the progress bar. (default: 0.1)
-
-``format``
-    Format of the progress bar.
-
-    Valid entries for the format field are ``topic``, ``bar``, ``number``,
-    ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
-    last 20 characters of the item, but this can be changed by adding either
-    ``-<num>`` which would take the last num characters, or ``+<num>`` for the
-    first num characters.
-
-    (default: topic bar number estimate)
-
-``width``
-    If set, the maximum width of the progress information (that is, min(width,
-    term width) will be used).
-
-``clear-complete``
-    Clear the progress bar after it's done. (default: True)
-
-``disable``
-    If true, don't show a progress bar.
-
-``assume-tty``
-    If true, ALWAYS show a progress bar, unless disable is given.
-
-``rebase``
-----------
-
-``evolution.allowdivergence``
-    Default to False, when True allow creating divergence when performing
-    rebase of obsolete changesets.
-
-``revsetalias``
----------------
-
-Alias definitions for revsets. See :hg:`help revsets` for details.
-
-``rewrite``
------------
-
-``backup-bundle``
-    Whether to save stripped changesets to a bundle file. (default: True)
-
-``update-timestamp``
-    If true, updates the date and time of the changeset to current. It is only
-    applicable for `hg amend`, `hg commit --amend` and `hg uncommit` in the
-    current version.
-
-``storage``
------------
-
-Control the strategy Mercurial uses internally to store history. Options in this
-category impact performance and repository size.
-
-``revlog.optimize-delta-parent-choice``
-    When storing a merge revision, both parents will be equally considered as
-    a possible delta base. This results in better delta selection and improved
-    revlog compression. This option is enabled by default.
-
-    Turning this option off can result in large increase of repository size for
-    repository with many merges.
-
-``revlog.reuse-external-delta-parent``
-    Control the order in which delta parents are considered when adding new
-    revisions from an external source.
-    (typically: apply bundle from `hg pull` or `hg push`).
-
-    New revisions are usually provided as a delta against other revisions. By
-    default, Mercurial will try to reuse this delta first, therefore using the
-    same "delta parent" as the source. Directly using delta's from the source
-    reduces CPU usage and usually speeds up operation. However, in some case,
-    the source might have sub-optimal delta bases and forcing their reevaluation
-    is useful. For example, pushes from an old client could have sub-optimal
-    delta's parent that the server want to optimize. (lack of general delta, bad
-    parents, choice, lack of sparse-revlog, etc).
-
-    This option is enabled by default. Turning it off will ensure bad delta
-    parent choices from older client do not propagate to this repository, at
-    the cost of a small increase in CPU consumption.
-
-    Note: this option only control the order in which delta parents are
-    considered.  Even when disabled, the existing delta from the source will be
-    reused if the same delta parent is selected.
-
-``revlog.reuse-external-delta``
-    Control the reuse of delta from external source.
-    (typically: apply bundle from `hg pull` or `hg push`).
-
-    New revisions are usually provided as a delta against another revision. By
-    default, Mercurial will not recompute the same delta again, trusting
-    externally provided deltas. There have been rare cases of small adjustment
-    to the diffing algorithm in the past. So in some rare case, recomputing
-    delta provided by ancient clients can provides better results. Disabling
-    this option means going through a full delta recomputation for all incoming
-    revisions. It means a large increase in CPU usage and will slow operations
-    down.
-
-    This option is enabled by default. When disabled, it also disables the
-    related ``storage.revlog.reuse-external-delta-parent`` option.
-
-``revlog.zlib.level``
-    Zlib compression level used when storing data into the repository. Accepted
-    Value range from 1 (lowest compression) to 9 (highest compression). Zlib
-    default value is 6.
-
-
-``revlog.zstd.level``
-    zstd compression level used when storing data into the repository. Accepted
-    Value range from 1 (lowest compression) to 22 (highest compression).
-    (default 3)
-
-``server``
-----------
-
-Controls generic server settings.
-
-``bookmarks-pushkey-compat``
-    Trigger pushkey hook when being pushed bookmark updates. This config exist
-    for compatibility purpose (default to True)
-
-    If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
-    movement we recommend you migrate them to ``txnclose-bookmark`` and
-    ``pretxnclose-bookmark``.
-
-``compressionengines``
-    List of compression engines and their relative priority to advertise
-    to clients.
-
-    The order of compression engines determines their priority, the first
-    having the highest priority. If a compression engine is not listed
-    here, it won't be advertised to clients.
-
-    If not set (the default), built-in defaults are used. Run
-    :hg:`debuginstall` to list available compression engines and their
-    default wire protocol priority.
-
-    Older Mercurial clients only support zlib compression and this setting
-    has no effect for legacy clients.
-
-``uncompressed``
-    Whether to allow clients to clone a repository using the
-    uncompressed streaming protocol. This transfers about 40% more
-    data than a regular clone, but uses less memory and CPU on both
-    server and client. Over a LAN (100 Mbps or better) or a very fast
-    WAN, an uncompressed streaming clone is a lot faster (~10x) than a
-    regular clone. Over most WAN connections (anything slower than
-    about 6 Mbps), uncompressed streaming is slower, because of the
-    extra data transfer overhead. This mode will also temporarily hold
-    the write lock while determining what data to transfer.
-    (default: True)
-
-``uncompressedallowsecret``
-    Whether to allow stream clones when the repository contains secret
-    changesets. (default: False)
-
-``preferuncompressed``
-    When set, clients will try to use the uncompressed streaming
-    protocol. (default: False)
-
-``disablefullbundle``
-    When set, servers will refuse attempts to do pull-based clones.
-    If this option is set, ``preferuncompressed`` and/or clone bundles
-    are highly recommended. Partial clones will still be allowed.
-    (default: False)
-
-``streamunbundle``
-    When set, servers will apply data sent from the client directly,
-    otherwise it will be written to a temporary file first. This option
-    effectively prevents concurrent pushes.
-
-``pullbundle``
-    When set, the server will check pullbundle.manifest for bundles
-    covering the requested heads and common nodes. The first matching
-    entry will be streamed to the client.
-
-    For HTTP transport, the stream will still use zlib compression
-    for older clients.
-
-``concurrent-push-mode``
-    Level of allowed race condition between two pushing clients.
-
-    - 'strict': push is abort if another client touched the repository
-      while the push was preparing. (default)
-    - 'check-related': push is only aborted if it affects head that got also
-      affected while the push was preparing.
-
-    This requires compatible client (version 4.3 and later). Old client will
-    use 'strict'.
-
-``validate``
-    Whether to validate the completeness of pushed changesets by
-    checking that all new file revisions specified in manifests are
-    present. (default: False)
-
-``maxhttpheaderlen``
-    Instruct HTTP clients not to send request headers longer than this
-    many bytes. (default: 1024)
-
-``bundle1``
-    Whether to allow clients to push and pull using the legacy bundle1
-    exchange format. (default: True)
-
-``bundle1gd``
-    Like ``bundle1`` but only used if the repository is using the
-    *generaldelta* storage format. (default: True)
-
-``bundle1.push``
-    Whether to allow clients to push using the legacy bundle1 exchange
-    format. (default: True)
-
-``bundle1gd.push``
-    Like ``bundle1.push`` but only used if the repository is using the
-    *generaldelta* storage format. (default: True)
-
-``bundle1.pull``
-    Whether to allow clients to pull using the legacy bundle1 exchange
-    format. (default: True)
-
-``bundle1gd.pull``
-    Like ``bundle1.pull`` but only used if the repository is using the
-    *generaldelta* storage format. (default: True)
-
-    Large repositories using the *generaldelta* storage format should
-    consider setting this option because converting *generaldelta*
-    repositories to the exchange format required by the bundle1 data
-    format can consume a lot of CPU.
-
-``bundle2.stream``
-    Whether to allow clients to pull using the bundle2 streaming protocol.
-    (default: True)
-
-``zliblevel``
-    Integer between ``-1`` and ``9`` that controls the zlib compression level
-    for wire protocol commands that send zlib compressed output (notably the
-    commands that send repository history data).
-
-    The default (``-1``) uses the default zlib compression level, which is
-    likely equivalent to ``6``. ``0`` means no compression. ``9`` means
-    maximum compression.
-
-    Setting this option allows server operators to make trade-offs between
-    bandwidth and CPU used. Lowering the compression lowers CPU utilization
-    but sends more bytes to clients.
-
-    This option only impacts the HTTP server.
-
-``zstdlevel``
-    Integer between ``1`` and ``22`` that controls the zstd compression level
-    for wire protocol commands. ``1`` is the minimal amount of compression and
-    ``22`` is the highest amount of compression.
-
-    The default (``3``) should be significantly faster than zlib while likely
-    delivering better compression ratios.
-
-    This option only impacts the HTTP server.
-
-    See also ``server.zliblevel``.
-
-``view``
-    Repository filter used when exchanging revisions with the peer.
-
-    The default view (``served``) excludes secret and hidden changesets.
-    Another useful value is ``immutable`` (no draft, secret or hidden
-    changesets). (EXPERIMENTAL)
-
-``smtp``
---------
-
-Configuration for extensions that need to send email messages.
-
-``host``
-    Host name of mail server, e.g. "mail.example.com".
-
-``port``
-    Optional. Port to connect to on mail server. (default: 465 if
-    ``tls`` is smtps; 25 otherwise)
-
-``tls``
-    Optional. Method to enable TLS when connecting to mail server: starttls,
-    smtps or none. (default: none)
-
-``username``
-    Optional. User name for authenticating with the SMTP server.
-    (default: None)
-
-``password``
-    Optional. Password for authenticating with the SMTP server. If not
-    specified, interactive sessions will prompt the user for a
-    password; non-interactive sessions will fail. (default: None)
-
-``local_hostname``
-    Optional. The hostname that the sender can use to identify
-    itself to the MTA.
-
-
-``subpaths``
-------------
-
-Subrepository source URLs can go stale if a remote server changes name
-or becomes temporarily unavailable. This section lets you define
-rewrite rules of the form::
-
-    <pattern> = <replacement>
-
-where ``pattern`` is a regular expression matching a subrepository
-source URL and ``replacement`` is the replacement string used to
-rewrite it. Groups can be matched in ``pattern`` and referenced in
-``replacements``. For instance::
-
-    http://server/(.*)-hg/ = http://hg.server/\1/
-
-rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
-
-Relative subrepository paths are first made absolute, and the
-rewrite rules are then applied on the full (absolute) path. If ``pattern``
-doesn't match the full path, an attempt is made to apply it on the
-relative path alone. The rules are applied in definition order.
-
-``subrepos``
-------------
-
-This section contains options that control the behavior of the
-subrepositories feature. See also :hg:`help subrepos`.
-
-Security note: auditing in Mercurial is known to be insufficient to
-prevent clone-time code execution with carefully constructed Git
-subrepos. It is unknown if a similar detect is present in Subversion
-subrepos. Both Git and Subversion subrepos are disabled by default
-out of security concerns. These subrepo types can be enabled using
-the respective options below.
-
-``allowed``
-    Whether subrepositories are allowed in the working directory.
-
-    When false, commands involving subrepositories (like :hg:`update`)
-    will fail for all subrepository types.
-    (default: true)
-
-``hg:allowed``
-    Whether Mercurial subrepositories are allowed in the working
-    directory. This option only has an effect if ``subrepos.allowed``
-    is true.
-    (default: true)
-
-``git:allowed``
-    Whether Git subrepositories are allowed in the working directory.
-    This option only has an effect if ``subrepos.allowed`` is true.
-
-    See the security note above before enabling Git subrepos.
-    (default: false)
-
-``svn:allowed``
-    Whether Subversion subrepositories are allowed in the working
-    directory. This option only has an effect if ``subrepos.allowed``
-    is true.
-
-    See the security note above before enabling Subversion subrepos.
-    (default: false)
-
-``templatealias``
------------------
-
-Alias definitions for templates. See :hg:`help templates` for details.
-
-``templates``
--------------
-
-Use the ``[templates]`` section to define template strings.
-See :hg:`help templates` for details.
-
-``trusted``
------------
-
-Mercurial will not use the settings in the
-``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
-user or to a trusted group, as various hgrc features allow arbitrary
-commands to be run. This issue is often encountered when configuring
-hooks or extensions for shared repositories or servers. However,
-the web interface will use some safe settings from the ``[web]``
-section.
-
-This section specifies what users and groups are trusted. The
-current user is always trusted. To trust everybody, list a user or a
-group with name ``*``. These settings must be placed in an
-*already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
-user or service running Mercurial.
-
-``users``
-  Comma-separated list of trusted users.
-
-``groups``
-  Comma-separated list of trusted groups.
-
-
-``ui``
-------
-
-User interface controls.
-
-``archivemeta``
-    Whether to include the .hg_archival.txt file containing meta data
-    (hashes for the repository base and for tip) in archives created
-    by the :hg:`archive` command or downloaded via hgweb.
-    (default: True)
-
-``askusername``
-    Whether to prompt for a username when committing. If True, and
-    neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
-    be prompted to enter a username. If no username is entered, the
-    default ``USER@HOST`` is used instead.
-    (default: False)
-
-``clonebundles``
-    Whether the "clone bundles" feature is enabled.
-
-    When enabled, :hg:`clone` may download and apply a server-advertised
-    bundle file from a URL instead of using the normal exchange mechanism.
-
-    This can likely result in faster and more reliable clones.
-
-    (default: True)
-
-``clonebundlefallback``
-    Whether failure to apply an advertised "clone bundle" from a server
-    should result in fallback to a regular clone.
-
-    This is disabled by default because servers advertising "clone
-    bundles" often do so to reduce server load. If advertised bundles
-    start mass failing and clients automatically fall back to a regular
-    clone, this would add significant and unexpected load to the server
-    since the server is expecting clone operations to be offloaded to
-    pre-generated bundles. Failing fast (the default behavior) ensures
-    clients don't overwhelm the server when "clone bundle" application
-    fails.
-
-    (default: False)
-
-``clonebundleprefers``
-    Defines preferences for which "clone bundles" to use.
-
-    Servers advertising "clone bundles" may advertise multiple available
-    bundles. Each bundle may have different attributes, such as the bundle
-    type and compression format. This option is used to prefer a particular
-    bundle over another.
-
-    The following keys are defined by Mercurial:
-
-    BUNDLESPEC
-       A bundle type specifier. These are strings passed to :hg:`bundle -t`.
-       e.g. ``gzip-v2`` or ``bzip2-v1``.
-
-    COMPRESSION
-       The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
-
-    Server operators may define custom keys.
-
-    Example values: ``COMPRESSION=bzip2``,
-    ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
-
-    By default, the first bundle advertised by the server is used.
-
-``color``
-    When to colorize output. Possible value are Boolean ("yes" or "no"), or
-    "debug", or "always". (default: "yes"). "yes" will use color whenever it
-    seems possible. See :hg:`help color` for details.
-
-``commitsubrepos``
-    Whether to commit modified subrepositories when committing the
-    parent repository. If False and one subrepository has uncommitted
-    changes, abort the commit.
-    (default: False)
-
-``debug``
-    Print debugging information. (default: False)
-
-``editor``
-    The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
-
-``fallbackencoding``
-    Encoding to try if it's not possible to decode the changelog using
-    UTF-8. (default: ISO-8859-1)
-
-``graphnodetemplate``
-    The template used to print changeset nodes in an ASCII revision graph.
-    (default: ``{graphnode}``)
-
-``ignore``
-    A file to read per-user ignore patterns from. This file should be
-    in the same format as a repository-wide .hgignore file. Filenames
-    are relative to the repository root. This option supports hook syntax,
-    so if you want to specify multiple ignore files, you can do so by
-    setting something like ``ignore.other = ~/.hgignore2``. For details
-    of the ignore file format, see the ``hgignore(5)`` man page.
-
-``interactive``
-    Allow to prompt the user. (default: True)
-
-``interface``
-    Select the default interface for interactive features (default: text).
-    Possible values are 'text' and 'curses'.
-
-``interface.chunkselector``
-    Select the interface for change recording (e.g. :hg:`commit -i`).
-    Possible values are 'text' and 'curses'.
-    This config overrides the interface specified by ui.interface.
-
-``large-file-limit``
-    Largest file size that gives no memory use warning.
-    Possible values are integers or 0 to disable the check.
-    (default: 10000000)
-
-``logtemplate``
-    Template string for commands that print changesets.
-
-``merge``
-    The conflict resolution program to use during a manual merge.
-    For more information on merge tools see :hg:`help merge-tools`.
-    For configuring merge tools see the ``[merge-tools]`` section.
-
-``mergemarkers``
-    Sets the merge conflict marker label styling. The ``detailed``
-    style uses the ``mergemarkertemplate`` setting to style the labels.
-    The ``basic`` style just uses 'local' and 'other' as the marker label.
-    One of ``basic`` or ``detailed``.
-    (default: ``basic``)
-
-``mergemarkertemplate``
-    The template used to print the commit description next to each conflict
-    marker during merge conflicts. See :hg:`help templates` for the template
-    format.
-
-    Defaults to showing the hash, tags, branches, bookmarks, author, and
-    the first line of the commit description.
-
-    If you use non-ASCII characters in names for tags, branches, bookmarks,
-    authors, and/or commit descriptions, you must pay attention to encodings of
-    managed files. At template expansion, non-ASCII characters use the encoding
-    specified by the ``--encoding`` global option, ``HGENCODING`` or other
-    environment variables that govern your locale. If the encoding of the merge
-    markers is different from the encoding of the merged files,
-    serious problems may occur.
-
-    Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
-
-``message-output``
-    Where to write status and error messages. (default: ``stdio``)
-
-    ``stderr``
-      Everything to stderr.
-    ``stdio``
-      Status to stdout, and error to stderr.
-
-``origbackuppath``
-    The path to a directory used to store generated .orig files. If the path is
-    not a directory, one will be created.  If set, files stored in this
-    directory have the same name as the original file and do not have a .orig
-    suffix.
-
-``paginate``
-  Control the pagination of command output (default: True). See :hg:`help pager`
-  for details.
-
-``patch``
-    An optional external tool that ``hg import`` and some extensions
-    will use for applying patches. By default Mercurial uses an
-    internal patch utility. The external tool must work as the common
-    Unix ``patch`` program. In particular, it must accept a ``-p``
-    argument to strip patch headers, a ``-d`` argument to specify the
-    current directory, a file name to patch, and a patch file to take
-    from stdin.
-
-    It is possible to specify a patch tool together with extra
-    arguments. For example, setting this option to ``patch --merge``
-    will use the ``patch`` program with its 2-way merge option.
-
-``portablefilenames``
-    Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
-    (default: ``warn``)
-
-    ``warn``
-      Print a warning message on POSIX platforms, if a file with a non-portable
-      filename is added (e.g. a file with a name that can't be created on
-      Windows because it contains reserved parts like ``AUX``, reserved
-      characters like ``:``, or would cause a case collision with an existing
-      file).
-
-    ``ignore``
-      Don't print a warning.
-
-    ``abort``
-      The command is aborted.
-
-    ``true``
-      Alias for ``warn``.
-
-    ``false``
-      Alias for ``ignore``.
-
-    .. container:: windows
-
-      On Windows, this configuration option is ignored and the command aborted.
-
-``pre-merge-tool-output-template``
-    A template that is printed before executing an external merge tool. This can
-    be used to print out additional context that might be useful to have during
-    the conflict resolution, such as the description of the various commits
-    involved or bookmarks/tags.
-
-    Additional information is available in the ``local`, ``base``, and ``other``
-    dicts. For example: ``{local.label}``, ``{base.name}``, or
-    ``{other.islink}``.
-
-``quiet``
-    Reduce the amount of output printed.
-    (default: False)
-
-``relative-paths``
-    Prefer relative paths in the UI.
-
-``remotecmd``
-    Remote command to use for clone/push/pull operations.
-    (default: ``hg``)
-
-``report_untrusted``
-    Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
-    trusted user or group.
-    (default: True)
-
-``slash``
-    (Deprecated. Use ``slashpath`` template filter instead.)
-
-    Display paths using a slash (``/``) as the path separator. This
-    only makes a difference on systems where the default path
-    separator is not the slash character (e.g. Windows uses the
-    backslash character (``\``)).
-    (default: False)
-
-``statuscopies``
-    Display copies in the status command.
-
-``ssh``
-    Command to use for SSH connections. (default: ``ssh``)
-
-``ssherrorhint``
-    A hint shown to the user in the case of SSH error (e.g.
-    ``Please see http://company/internalwiki/ssh.html``)
-
-``strict``
-    Require exact command names, instead of allowing unambiguous
-    abbreviations. (default: False)
-
-``style``
-    Name of style to use for command output.
-
-``supportcontact``
-    A URL where users should report a Mercurial traceback. Use this if you are a
-    large organisation with its own Mercurial deployment process and crash
-    reports should be addressed to your internal support.
-
-``textwidth``
-    Maximum width of help text. A longer line generated by ``hg help`` or
-    ``hg subcommand --help`` will be broken after white space to get this
-    width or the terminal width, whichever comes first.
-    A non-positive value will disable this and the terminal width will be
-    used. (default: 78)
-
-``timeout``
-    The timeout used when a lock is held (in seconds), a negative value
-    means no timeout. (default: 600)
-
-``timeout.warn``
-    Time (in seconds) before a warning is printed about held lock. A negative
-    value means no warning. (default: 0)
-
-``traceback``
-    Mercurial always prints a traceback when an unknown exception
-    occurs. Setting this to True will make Mercurial print a traceback
-    on all exceptions, even those recognized by Mercurial (such as
-    IOError or MemoryError). (default: False)
-
-``tweakdefaults``
-
-    By default Mercurial's behavior changes very little from release
-    to release, but over time the recommended config settings
-    shift. Enable this config to opt in to get automatic tweaks to
-    Mercurial's behavior over time. This config setting will have no
-    effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
-    not include ``tweakdefaults``. (default: False)
-
-    It currently means::
-
-      .. tweakdefaultsmarker
-
-``username``
-    The committer of a changeset created when running "commit".
-    Typically a person's name and email address, e.g. ``Fred Widget
-    <fred@example.com>``. Environment variables in the
-    username are expanded.
-
-    (default: ``$EMAIL`` or ``username@hostname``. If the username in
-    hgrc is empty, e.g. if the system admin set ``username =`` in the
-    system hgrc, it has to be specified manually or in a different
-    hgrc file)
-
-``verbose``
-    Increase the amount of output printed. (default: False)
-
-
-``web``
--------
-
-Web interface configuration. The settings in this section apply to
-both the builtin webserver (started by :hg:`serve`) and the script you
-run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
-and WSGI).
-
-The Mercurial webserver does no authentication (it does not prompt for
-usernames and passwords to validate *who* users are), but it does do
-authorization (it grants or denies access for *authenticated users*
-based on settings in this section). You must either configure your
-webserver to do authentication for you, or disable the authorization
-checks.
-
-For a quick setup in a trusted environment, e.g., a private LAN, where
-you want it to accept pushes from anybody, you can use the following
-command line::
-
-    $ hg --config web.allow-push=* --config web.push_ssl=False serve
-
-Note that this will allow anybody to push anything to the server and
-that this should not be used for public servers.
-
-The full set of options is:
-
-``accesslog``
-    Where to output the access log. (default: stdout)
-
-``address``
-    Interface address to bind to. (default: all)
-
-``allow-archive``
-    List of archive format (bz2, gz, zip) allowed for downloading.
-    (default: empty)
-
-``allowbz2``
-    (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
-    revisions.
-    (default: False)
-
-``allowgz``
-    (DEPRECATED) Whether to allow .tar.gz downloading of repository
-    revisions.
-    (default: False)
-
-``allow-pull``
-    Whether to allow pulling from the repository. (default: True)
-
-``allow-push``
-    Whether to allow pushing to the repository. If empty or not set,
-    pushing is not allowed. If the special value ``*``, any remote
-    user can push, including unauthenticated users. Otherwise, the
-    remote user must have been authenticated, and the authenticated
-    user name must be present in this list. The contents of the
-    allow-push list are examined after the deny_push list.
-
-``allow_read``
-    If the user has not already been denied repository access due to
-    the contents of deny_read, this list determines whether to grant
-    repository access to the user. If this list is not empty, and the
-    user is unauthenticated or not present in the list, then access is
-    denied for the user. If the list is empty or not set, then access
-    is permitted to all users by default. Setting allow_read to the
-    special value ``*`` is equivalent to it not being set (i.e. access
-    is permitted to all users). The contents of the allow_read list are
-    examined after the deny_read list.
-
-``allowzip``
-    (DEPRECATED) Whether to allow .zip downloading of repository
-    revisions. This feature creates temporary files.
-    (default: False)
-
-``archivesubrepos``
-    Whether to recurse into subrepositories when archiving.
-    (default: False)
-
-``baseurl``
-    Base URL to use when publishing URLs in other locations, so
-    third-party tools like email notification hooks can construct
-    URLs. Example: ``http://hgserver/repos/``.
-
-``cacerts``
-    Path to file containing a list of PEM encoded certificate
-    authority certificates. Environment variables and ``~user``
-    constructs are expanded in the filename. If specified on the
-    client, then it will verify the identity of remote HTTPS servers
-    with these certificates.
-
-    To disable SSL verification temporarily, specify ``--insecure`` from
-    command line.
-
-    You can use OpenSSL's CA certificate file if your platform has
-    one. On most Linux systems this will be
-    ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
-    generate this file manually. The form must be as follows::
-
-        -----BEGIN CERTIFICATE-----
-        ... (certificate in base64 PEM encoding) ...
-        -----END CERTIFICATE-----
-        -----BEGIN CERTIFICATE-----
-        ... (certificate in base64 PEM encoding) ...
-        -----END CERTIFICATE-----
-
-``cache``
-    Whether to support caching in hgweb. (default: True)
-
-``certificate``
-    Certificate to use when running :hg:`serve`.
-
-``collapse``
-    With ``descend`` enabled, repositories in subdirectories are shown at
-    a single level alongside repositories in the current path. With
-    ``collapse`` also enabled, repositories residing at a deeper level than
-    the current path are grouped behind navigable directory entries that
-    lead to the locations of these repositories. In effect, this setting
-    collapses each collection of repositories found within a subdirectory
-    into a single entry for that subdirectory. (default: False)
-
-``comparisoncontext``
-    Number of lines of context to show in side-by-side file comparison. If
-    negative or the value ``full``, whole files are shown. (default: 5)
-
-    This setting can be overridden by a ``context`` request parameter to the
-    ``comparison`` command, taking the same values.
-
-``contact``
-    Name or email address of the person in charge of the repository.
-    (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
-
-``csp``
-    Send a ``Content-Security-Policy`` HTTP header with this value.
-
-    The value may contain a special string ``%nonce%``, which will be replaced
-    by a randomly-generated one-time use value. If the value contains
-    ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
-    one-time property of the nonce. This nonce will also be inserted into
-    ``<script>`` elements containing inline JavaScript.
-
-    Note: lots of HTML content sent by the server is derived from repository
-    data. Please consider the potential for malicious repository data to
-    "inject" itself into generated HTML content as part of your security
-    threat model.
-
-``deny_push``
-    Whether to deny pushing to the repository. If empty or not set,
-    push is not denied. If the special value ``*``, all remote users are
-    denied push. Otherwise, unauthenticated users are all denied, and
-    any authenticated user name present in this list is also denied. The
-    contents of the deny_push list are examined before the allow-push list.
-
-``deny_read``
-    Whether to deny reading/viewing of the repository. If this list is
-    not empty, unauthenticated users are all denied, and any
-    authenticated user name present in this list is also denied access to
-    the repository. If set to the special value ``*``, all remote users
-    are denied access (rarely needed ;). If deny_read is empty or not set,
-    the determination of repository access depends on the presence and
-    content of the allow_read list (see description). If both
-    deny_read and allow_read are empty or not set, then access is
-    permitted to all users by default. If the repository is being
-    served via hgwebdir, denied users will not be able to see it in
-    the list of repositories. The contents of the deny_read list have
-    priority over (are examined before) the contents of the allow_read
-    list.
-
-``descend``
-    hgwebdir indexes will not descend into subdirectories. Only repositories
-    directly in the current path will be shown (other repositories are still
-    available from the index corresponding to their containing path).
-
-``description``
-    Textual description of the repository's purpose or contents.
-    (default: "unknown")
-
-``encoding``
-    Character encoding name. (default: the current locale charset)
-    Example: "UTF-8".
-
-``errorlog``
-    Where to output the error log. (default: stderr)
-
-``guessmime``
-    Control MIME types for raw download of file content.
-    Set to True to let hgweb guess the content type from the file
-    extension. This will serve HTML files as ``text/html`` and might
-    allow cross-site scripting attacks when serving untrusted
-    repositories. (default: False)
-
-``hidden``
-    Whether to hide the repository in the hgwebdir index.
-    (default: False)
-
-``ipv6``
-    Whether to use IPv6. (default: False)
-
-``labels``
-    List of string *labels* associated with the repository.
-
-    Labels are exposed as a template keyword and can be used to customize
-    output. e.g. the ``index`` template can group or filter repositories
-    by labels and the ``summary`` template can display additional content
-    if a specific label is present.
-
-``logoimg``
-    File name of the logo image that some templates display on each page.
-    The file name is relative to ``staticurl``. That is, the full path to
-    the logo image is "staticurl/logoimg".
-    If unset, ``hglogo.png`` will be used.
-
-``logourl``
-    Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
-    will be used.
-
-``maxchanges``
-    Maximum number of changes to list on the changelog. (default: 10)
-
-``maxfiles``
-    Maximum number of files to list per changeset. (default: 10)
-
-``maxshortchanges``
-    Maximum number of changes to list on the shortlog, graph or filelog
-    pages. (default: 60)
-
-``name``
-    Repository name to use in the web interface.
-    (default: current working directory)
-
-``port``
-    Port to listen on. (default: 8000)
-
-``prefix``
-    Prefix path to serve from. (default: '' (server root))
-
-``push_ssl``
-    Whether to require that inbound pushes be transported over SSL to
-    prevent password sniffing. (default: True)
-
-``refreshinterval``
-    How frequently directory listings re-scan the filesystem for new
-    repositories, in seconds. This is relevant when wildcards are used
-    to define paths. Depending on how much filesystem traversal is
-    required, refreshing may negatively impact performance.
-
-    Values less than or equal to 0 always refresh.
-    (default: 20)
-
-``server-header``
-    Value for HTTP ``Server`` response header.
-
-``static``
-    Directory where static files are served from.
-
-``staticurl``
-    Base URL to use for static files. If unset, static files (e.g. the
-    hgicon.png favicon) will be served by the CGI script itself. Use
-    this setting to serve them directly with the HTTP server.
-    Example: ``http://hgserver/static/``.
-
-``stripes``
-    How many lines a "zebra stripe" should span in multi-line output.
-    Set to 0 to disable. (default: 1)
-
-``style``
-    Which template map style to use. The available options are the names of
-    subdirectories in the HTML templates path. (default: ``paper``)
-    Example: ``monoblue``.
-
-``templates``
-    Where to find the HTML templates. The default path to the HTML templates
-    can be obtained from ``hg debuginstall``.
-
-``websub``
-----------
-
-Web substitution filter definition. You can use this section to
-define a set of regular expression substitution patterns which
-let you automatically modify the hgweb server output.
-
-The default hgweb templates only apply these substitution patterns
-on the revision description fields. You can apply them anywhere
-you want when you create your own templates by adding calls to the
-"websub" filter (usually after calling the "escape" filter).
-
-This can be used, for example, to convert issue references to links
-to your issue tracker, or to convert "markdown-like" syntax into
-HTML (see the examples below).
-
-Each entry in this section names a substitution filter.
-The value of each entry defines the substitution expression itself.
-The websub expressions follow the old interhg extension syntax,
-which in turn imitates the Unix sed replacement syntax::
-
-    patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
-
-You can use any separator other than "/". The final "i" is optional
-and indicates that the search must be case insensitive.
-
-Examples::
-
-    [websub]
-    issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
-    italic = s/\b_(\S+)_\b/<i>\1<\/i>/
-    bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
-
-``worker``
-----------
-
-Parallel master/worker configuration. We currently perform working
-directory updates in parallel on Unix-like systems, which greatly
-helps performance.
-
-``enabled``
-    Whether to enable workers code to be used.
-    (default: true)
-
-``numcpus``
-    Number of CPUs to use for parallel operations. A zero or
-    negative value is treated as ``use the default``.
-    (default: 4 or the number of CPUs on the system, whichever is larger)
-
-``backgroundclose``
-    Whether to enable closing file handles on background threads during certain
-    operations. Some platforms aren't very efficient at closing file
-    handles that have been written or appended to. By performing file closing
-    on background threads, file write rate can increase substantially.
-    (default: true on Windows, false elsewhere)
-
-``backgroundcloseminfilecount``
-    Minimum number of files required to trigger background file closing.
-    Operations not writing this many files won't start background close
-    threads.
-    (default: 2048)
-
-``backgroundclosemaxqueue``
-    The maximum number of opened file handles waiting to be closed in the
-    background. This option only has an effect if ``backgroundclose`` is
-    enabled.
-    (default: 384)
-
-``backgroundclosethreadcount``
-    Number of threads to process background file closes. Only relevant if
-    ``backgroundclose`` is enabled.
-    (default: 4)
--- a/mercurial/help/dates.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-Some commands allow the user to specify a date, e.g.:
-
-- backout, commit, import, tag: Specify the commit date.
-- log, revert, update: Select revision(s) by date.
-
-Many date formats are valid. Here are some examples:
-
-- ``Wed Dec 6 13:18:29 2006`` (local timezone assumed)
-- ``Dec 6 13:18 -0600`` (year assumed, time offset provided)
-- ``Dec 6 13:18 UTC`` (UTC and GMT are aliases for +0000)
-- ``Dec 6`` (midnight)
-- ``13:18`` (today assumed)
-- ``3:39`` (3:39AM assumed)
-- ``3:39pm`` (15:39)
-- ``2006-12-06 13:18:29`` (ISO 8601 format)
-- ``2006-12-6 13:18``
-- ``2006-12-6``
-- ``12-6``
-- ``12/6``
-- ``12/6/6`` (Dec 6 2006)
-- ``today`` (midnight)
-- ``yesterday`` (midnight)
-- ``now`` - right now
-
-Lastly, there is Mercurial's internal format:
-
-- ``1165411109 0`` (Wed Dec 6 13:18:29 2006 UTC)
-
-This is the internal representation format for dates. The first number
-is the number of seconds since the epoch (1970-01-01 00:00 UTC). The
-second is the offset of the local timezone, in seconds west of UTC
-(negative if the timezone is east of UTC).
-
-The log command also accepts date ranges:
-
-- ``<DATE`` - at or before a given date/time
-- ``>DATE`` - on or after a given date/time
-- ``DATE to DATE`` - a date range, inclusive
-- ``-DAYS`` - within a given number of days of today
--- a/mercurial/help/deprecated.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-Mercurial evolves over time, some features, options, commands may be replaced by
-better and more secure alternatives. This topic will help you migrating your
-existing usage and/or configuration to newer features.
-
-Commands
-========
-
-The following commands are still available but their use are not recommended:
-
-``locate``
-
-This command has been replaced by `hg files`.
-
-``parents``
-
-This command can be replaced by `hg summary` or `hg log` with appropriate
-revsets. See `hg help revsets` for more information.
-
-``tip``
-
-The recommended alternative is `hg heads`.
-
-Options
-=======
-
-``web.allowpull``
-    Renamed to `allow-pull`.
-
-``web.allow_push``
-    Renamed to `allow-push`.
--- a/mercurial/help/diffs.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,29 +0,0 @@
-Mercurial's default format for showing changes between two versions of
-a file is compatible with the unified format of GNU diff, which can be
-used by GNU patch and many other standard tools.
-
-While this standard format is often enough, it does not encode the
-following information:
-
-- executable status and other permission bits
-- copy or rename information
-- changes in binary files
-- creation or deletion of empty files
-
-Mercurial also supports the extended diff format from the git VCS
-which addresses these limitations. The git diff format is not produced
-by default because a few widespread tools still do not understand this
-format.
-
-This means that when generating diffs from a Mercurial repository
-(e.g. with :hg:`export`), you should be careful about things like file
-copies and renames or other things mentioned above, because when
-applying a standard diff to a different repository, this extra
-information is lost. Mercurial's internal operations (like push and
-pull) are not affected by this, because they use an internal binary
-format for communicating changes.
-
-To make Mercurial produce the git extended diff format, use the --git
-option available for many commands, or set 'git = True' in the [diff]
-section of your configuration file. You do not need to set this option
-when importing diffs in this format or using them in the mq extension.
--- a/mercurial/help/environment.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,121 +0,0 @@
-HG
-    Path to the 'hg' executable, automatically passed when running
-    hooks, extensions or external tools. If unset or empty, this is
-    the hg executable's name if it's frozen, or an executable named
-    'hg' (with %PATHEXT% [defaulting to COM/EXE/BAT/CMD] extensions on
-    Windows) is searched.
-
-HGEDITOR
-    This is the name of the editor to run when committing. See EDITOR.
-
-    (deprecated, see :hg:`help config.ui.editor`)
-
-HGENCODING
-    This overrides the default locale setting detected by Mercurial.
-    This setting is used to convert data including usernames,
-    changeset descriptions, tag names, and branches. This setting can
-    be overridden with the --encoding command-line option.
-
-HGENCODINGMODE
-    This sets Mercurial's behavior for handling unknown characters
-    while transcoding user input. The default is "strict", which
-    causes Mercurial to abort if it can't map a character. Other
-    settings include "replace", which replaces unknown characters, and
-    "ignore", which drops them. This setting can be overridden with
-    the --encodingmode command-line option.
-
-HGENCODINGAMBIGUOUS
-    This sets Mercurial's behavior for handling characters with
-    "ambiguous" widths like accented Latin characters with East Asian
-    fonts. By default, Mercurial assumes ambiguous characters are
-    narrow, set this variable to "wide" if such characters cause
-    formatting problems.
-
-HGMERGE
-    An executable to use for resolving merge conflicts. The program
-    will be executed with three arguments: local file, remote file,
-    ancestor file.
-
-    (deprecated, see :hg:`help config.ui.merge`)
-
-HGRCPATH
-    A list of files or directories to search for configuration
-    files. Item separator is ":" on Unix, ";" on Windows. If HGRCPATH
-    is not set, platform default search path is used. If empty, only
-    the .hg/hgrc from the current repository is read.
-
-    For each element in HGRCPATH:
-
-    - if it's a directory, all files ending with .rc are added
-    - otherwise, the file itself will be added
-
-HGPLAIN
-    When set, this disables any configuration settings that might
-    change Mercurial's default output. This includes encoding,
-    defaults, verbose mode, debug mode, quiet mode, tracebacks, and
-    localization. This can be useful when scripting against Mercurial
-    in the face of existing user configuration.
-
-    In addition to the features disabled by ``HGPLAIN=``, the following
-    values can be specified to adjust behavior:
-
-    ``+strictflags``
-        Restrict parsing of command line flags.
-
-    Equivalent options set via command line flags or environment
-    variables are not overridden.
-
-    See :hg:`help scripting` for details.
-
-HGPLAINEXCEPT
-    This is a comma-separated list of features to preserve when
-    HGPLAIN is enabled. Currently the following values are supported:
-
-    ``alias``
-        Don't remove aliases.
-    ``color``
-        Don't disable colored output.
-    ``i18n``
-        Preserve internationalization.
-    ``revsetalias``
-        Don't remove revset aliases.
-    ``templatealias``
-        Don't remove template aliases.
-    ``progress``
-        Don't hide progress output.
-
-    Setting HGPLAINEXCEPT to anything (even an empty string) will
-    enable plain mode.
-
-HGUSER
-    This is the string used as the author of a commit. If not set,
-    available values will be considered in this order:
-
-    - HGUSER (deprecated)
-    - configuration files from the HGRCPATH
-    - EMAIL
-    - interactive prompt
-    - LOGNAME (with ``@hostname`` appended)
-
-    (deprecated, see :hg:`help config.ui.username`)
-
-EMAIL
-    May be used as the author of a commit; see HGUSER.
-
-LOGNAME
-    May be used as the author of a commit; see HGUSER.
-
-VISUAL
-    This is the name of the editor to use when committing. See EDITOR.
-
-EDITOR
-    Sometimes Mercurial needs to open a text file in an editor for a
-    user to modify, for example when writing commit messages. The
-    editor it uses is determined by looking at the environment
-    variables HGEDITOR, VISUAL and EDITOR, in that order. The first
-    non-empty one is chosen. If all of them are empty, the editor
-    defaults to 'vi'.
-
-PYTHONPATH
-    This is used by Python to find imported modules and may need to be
-    set appropriately if this Mercurial is not installed system-wide.
--- a/mercurial/help/extensions.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-Mercurial has the ability to add new features through the use of
-extensions. Extensions may add new commands, add options to
-existing commands, change the default behavior of commands, or
-implement hooks.
-
-To enable the "foo" extension, either shipped with Mercurial or in the
-Python search path, create an entry for it in your configuration file,
-like this::
-
-  [extensions]
-  foo =
-
-You may also specify the full path to an extension::
-
-  [extensions]
-  myfeature = ~/.hgext/myfeature.py
-
-See :hg:`help config` for more information on configuration files.
-
-Extensions are not loaded by default for a variety of reasons:
-they can increase startup overhead; they may be meant for advanced
-usage only; they may provide potentially dangerous abilities (such
-as letting you destroy or modify history); they might not be ready
-for prime time; or they may alter some usual behaviors of stock
-Mercurial. It is thus up to the user to activate extensions as
-needed.
-
-To explicitly disable an extension enabled in a configuration file of
-broader scope, prepend its path with !::
-
-  [extensions]
-  # disabling extension bar residing in /path/to/extension/bar.py
-  bar = !/path/to/extension/bar.py
-  # ditto, but no path was supplied for extension baz
-  baz = !
--- a/mercurial/help/filesets.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,79 +0,0 @@
-Mercurial supports a functional language for selecting a set of
-files.
-
-Like other file patterns, this pattern type is indicated by a prefix,
-'set:'. The language supports a number of predicates which are joined
-by infix operators. Parenthesis can be used for grouping.
-
-Identifiers such as filenames or patterns must be quoted with single
-or double quotes if they contain characters outside of
-``[.*{}[]?/\_a-zA-Z0-9\x80-\xff]`` or if they match one of the
-predefined predicates. This generally applies to file patterns other
-than globs and arguments for predicates. Pattern prefixes such as
-``path:`` may be specified without quoting.
-
-Special characters can be used in quoted identifiers by escaping them,
-e.g., ``\n`` is interpreted as a newline. To prevent them from being
-interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``.
-
-See also :hg:`help patterns`.
-
-Operators
-=========
-
-There is a single prefix operator:
-
-``not x``
-  Files not in x. Short form is ``! x``.
-
-These are the supported infix operators:
-
-``x and y``
-  The intersection of files in x and y. Short form is ``x & y``.
-
-``x or y``
-  The union of files in x and y. There are two alternative short
-  forms: ``x | y`` and ``x + y``.
-
-``x - y``
-  Files in x but not in y.
-
-Predicates
-==========
-
-The following predicates are supported:
-
-.. predicatesmarker
-
-Examples
-========
-
-Some sample queries:
-
-- Show status of files that appear to be binary in the working directory::
-
-    hg status -A "set:binary()"
-
-- Forget files that are in .hgignore but are already tracked::
-
-    hg forget "set:hgignore() and not ignored()"
-
-- Find text files that contain a string::
-
-    hg files "set:grep(magic) and not binary()"
-
-- Find C files in a non-standard encoding::
-
-    hg files "set:**.c and not encoding('UTF-8')"
-
-- Revert copies of large binary files::
-
-    hg revert "set:copied() and binary() and size('>1M')"
-
-- Revert files that were added to the working directory::
-
-    hg revert "set:revs('wdir()', added())"
-
-- Remove files listed in foo.lst that contain the letter a or b::
-
-    hg remove "set: listfile:foo.lst and (**a* or **b*)"
--- a/mercurial/help/flags.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,104 +0,0 @@
-Most Mercurial commands accept various flags.
-
-Flag names
-==========
-
-Flags for each command are listed in :hg:`help` for that command.
-Additionally, some flags, such as --repository, are global and can be used with
-any command - those are seen in :hg:`help -v`, and can be specified before or
-after the command.
-
-Every flag has at least a long name, such as --repository. Some flags may also
-have a short one-letter name, such as the equivalent -R. Using the short or long
-name is equivalent and has the same effect.
-
-Flags that have a short name can also be bundled together - for instance, to
-specify both --edit (short -e) and --interactive (short -i), one could use::
-
-    hg commit -ei
-
-If any of the bundled flags takes a value (i.e. is not a boolean), it must be
-last, followed by the value::
-
-    hg commit -im 'Message'
-
-Flag types
-==========
-
-Mercurial command-line flags can be strings, numbers, booleans, or lists of
-strings.
-
-Specifying flag values
-======================
-
-The following syntaxes are allowed, assuming a flag 'flagname' with short name
-'f'::
-
-    --flagname=foo
-    --flagname foo
-    -f foo
-    -ffoo
-
-This syntax applies to all non-boolean flags (strings, numbers or lists).
-
-Specifying boolean flags
-========================
-
-Boolean flags do not take a value parameter. To specify a boolean, use the flag
-name to set it to true, or the same name prefixed with 'no-' to set it to
-false::
-
-    hg commit --interactive
-    hg commit --no-interactive
-
-Specifying list flags
-=====================
-
-List flags take multiple values. To specify them, pass the flag multiple times::
-
-    hg files --include mercurial --include tests
-
-Setting flag defaults
-=====================
-
-In order to set a default value for a flag in an hgrc file, it is recommended to
-use aliases::
-
-    [alias]
-    commit = commit --interactive
-
-For more information on hgrc files, see :hg:`help config`.
-
-Overriding flags on the command line
-====================================
-
-If the same non-list flag is specified multiple times on the command line, the
-latest specification is used::
-
-    hg commit -m "Ignored value" -m "Used value"
-
-This includes the use of aliases - e.g., if one has::
-
-    [alias]
-    committemp = commit -m "Ignored value"
-
-then the following command will override that -m::
-
-    hg committemp -m "Used value"
-
-Overriding flag defaults
-========================
-
-Every flag has a default value, and you may also set your own defaults in hgrc
-as described above.
-Except for list flags, defaults can be overridden on the command line simply by
-specifying the flag in that location.
-
-Hidden flags
-============
-
-Some flags are not shown in a command's help by default - specifically, those
-that are deemed to be experimental, deprecated or advanced. To show all flags,
-add the --verbose flag for the help command::
-
-    hg help --verbose commit
--- a/mercurial/help/glossary.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,413 +0,0 @@
-Ancestor
-    Any changeset that can be reached by an unbroken chain of parent
-    changesets from a given changeset. More precisely, the ancestors
-    of a changeset can be defined by two properties: a parent of a
-    changeset is an ancestor, and a parent of an ancestor is an
-    ancestor. See also: 'Descendant'.
-
-Bookmark
-    Bookmarks are pointers to certain commits that move when
-    committing. They are similar to tags in that it is possible to use
-    bookmark names in all places where Mercurial expects a changeset
-    ID, e.g., with :hg:`update`. Unlike tags, bookmarks move along
-    when you make a commit.
-
-    Bookmarks can be renamed, copied and deleted. Bookmarks are local,
-    unless they are explicitly pushed or pulled between repositories.
-    Pushing and pulling bookmarks allow you to collaborate with others
-    on a branch without creating a named branch.
-
-Branch
-    (Noun) A child changeset that has been created from a parent that
-    is not a head. These are known as topological branches, see
-    'Branch, topological'. If a topological branch is named, it becomes
-    a named branch. If a topological branch is not named, it becomes
-    an anonymous branch. See 'Branch, anonymous' and 'Branch, named'.
-
-    Branches may be created when changes are pulled from or pushed to
-    a remote repository, since new heads may be created by these
-    operations. Note that the term branch can also be used informally
-    to describe a development process in which certain development is
-    done independently of other development. This is sometimes done
-    explicitly with a named branch, but it can also be done locally,
-    using bookmarks or clones and anonymous branches.
-
-    Example: "The experimental branch."
-
-    (Verb) The action of creating a child changeset which results in
-    its parent having more than one child.
-
-    Example: "I'm going to branch at X."
-
-Branch, anonymous
-    Every time a new child changeset is created from a parent that is not
-    a head and the name of the branch is not changed, a new anonymous
-    branch is created.
-
-Branch, closed
-    A named branch whose branch heads have all been closed.
-
-Branch, default
-    The branch assigned to a changeset when no name has previously been
-    assigned.
-
-Branch head
-    See 'Head, branch'.
-
-Branch, inactive
-    If a named branch has no topological heads, it is considered to be
-    inactive. As an example, a feature branch becomes inactive when it
-    is merged into the default branch. The :hg:`branches` command
-    shows inactive branches by default, though they can be hidden with
-    :hg:`branches --active`.
-
-    NOTE: this concept is deprecated because it is too implicit.
-    Branches should now be explicitly closed using :hg:`commit
-    --close-branch` when they are no longer needed.
-
-Branch, named
-    A collection of changesets which have the same branch name. By
-    default, children of a changeset in a named branch belong to the
-    same named branch. A child can be explicitly assigned to a
-    different branch. See :hg:`help branch`, :hg:`help branches` and
-    :hg:`commit --close-branch` for more information on managing
-    branches.
-
-    Named branches can be thought of as a kind of namespace, dividing
-    the collection of changesets that comprise the repository into a
-    collection of disjoint subsets. A named branch is not necessarily
-    a topological branch. If a new named branch is created from the
-    head of another named branch, or the default branch, but no
-    further changesets are added to that previous branch, then that
-    previous branch will be a branch in name only.
-
-Branch tip
-    See 'Tip, branch'.
-
-Branch, topological
-    Every time a new child changeset is created from a parent that is
-    not a head, a new topological branch is created. If a topological
-    branch is named, it becomes a named branch. If a topological
-    branch is not named, it becomes an anonymous branch of the
-    current, possibly default, branch.
-
-Changelog
-    A record of the changesets in the order in which they were added
-    to the repository. This includes details such as changeset id,
-    author, commit message, date, and list of changed files.
-
-Changeset
-    A snapshot of the state of the repository used to record a change.
-
-Changeset, child
-    The converse of parent changeset: if P is a parent of C, then C is
-    a child of P. There is no limit to the number of children that a
-    changeset may have.
-
-Changeset id
-    A SHA-1 hash that uniquely identifies a changeset. It may be
-    represented as either a "long" 40 hexadecimal digit string, or a
-    "short" 12 hexadecimal digit string.
-
-Changeset, merge
-    A changeset with two parents. This occurs when a merge is
-    committed.
-
-Changeset, parent
-    A revision upon which a child changeset is based. Specifically, a
-    parent changeset of a changeset C is a changeset whose node
-    immediately precedes C in the DAG. Changesets have at most two
-    parents.
-
-Checkout
-    (Noun) The working directory being updated to a specific
-    revision. This use should probably be avoided where possible, as
-    changeset is much more appropriate than checkout in this context.
-
-    Example: "I'm using checkout X."
-
-    (Verb) Updating the working directory to a specific changeset. See
-    :hg:`help update`.
-
-    Example: "I'm going to check out changeset X."
-
-Child changeset
-    See 'Changeset, child'.
-
-Close changeset
-    See 'Head, closed branch'.
-
-Closed branch
-    See 'Branch, closed'.
-
-Clone
-    (Noun) An entire or partial copy of a repository. The partial
-    clone must be in the form of a revision and its ancestors.
-
-    Example: "Is your clone up to date?"
-
-    (Verb) The process of creating a clone, using :hg:`clone`.
-
-    Example: "I'm going to clone the repository."
-
-Closed branch head
-    See 'Head, closed branch'.
-
-Commit
-    (Noun) A synonym for changeset.
-
-    Example: "Is the bug fixed in your recent commit?"
-
-    (Verb) The act of recording changes to a repository. When files
-    are committed in a working directory, Mercurial finds the
-    differences between the committed files and their parent
-    changeset, creating a new changeset in the repository.
-
-    Example: "You should commit those changes now."
-
-Cset
-    A common abbreviation of the term changeset.
-
-DAG
-    The repository of changesets of a distributed version control
-    system (DVCS) can be described as a directed acyclic graph (DAG),
-    consisting of nodes and edges, where nodes correspond to
-    changesets and edges imply a parent -> child relation. This graph
-    can be visualized by graphical tools such as :hg:`log --graph`. In
-    Mercurial, the DAG is limited by the requirement for children to
-    have at most two parents.
-
-Deprecated
-    Feature removed from documentation, but not scheduled for removal.
-
-Default branch
-    See 'Branch, default'.
-
-Descendant
-    Any changeset that can be reached by a chain of child changesets
-    from a given changeset. More precisely, the descendants of a
-    changeset can be defined by two properties: the child of a
-    changeset is a descendant, and the child of a descendant is a
-    descendant. See also: 'Ancestor'.
-
-Diff
-    (Noun) The difference between the contents and attributes of files
-    in two changesets or a changeset and the current working
-    directory. The difference is usually represented in a standard
-    form called a "diff" or "patch". The "git diff" format is used
-    when the changes include copies, renames, or changes to file
-    attributes, none of which can be represented/handled by classic
-    "diff" and "patch".
-
-    Example: "Did you see my correction in the diff?"
-
-    (Verb) Diffing two changesets is the action of creating a diff or
-    patch.
-
-    Example: "If you diff with changeset X, you will see what I mean."
-
-Directory, working
-    The working directory represents the state of the files tracked by
-    Mercurial, that will be recorded in the next commit. The working
-    directory initially corresponds to the snapshot at an existing
-    changeset, known as the parent of the working directory. See
-    'Parent, working directory'. The state may be modified by changes
-    to the files introduced manually or by a merge. The repository
-    metadata exists in the .hg directory inside the working directory.
-
-Draft
-    Changesets in the draft phase have not been shared with publishing
-    repositories and may thus be safely changed by history-modifying
-    extensions. See :hg:`help phases`.
-
-Experimental
-    Feature that may change or be removed at a later date.
-
-Graph
-    See DAG and :hg:`log --graph`.
-
-Head
-    The term 'head' may be used to refer to both a branch head or a
-    repository head, depending on the context. See 'Head, branch' and
-    'Head, repository' for specific definitions.
-
-    Heads are where development generally takes place and are the
-    usual targets for update and merge operations.
-
-Head, branch
-    A changeset with no descendants on the same named branch.
-
-Head, closed branch
-    A changeset that marks a head as no longer interesting. The closed
-    head is no longer listed by :hg:`heads`. A branch is considered
-    closed when all its heads are closed and consequently is not
-    listed by :hg:`branches`.
-
-    Closed heads can be re-opened by committing new changeset as the
-    child of the changeset that marks a head as closed.
-
-Head, repository
-    A topological head which has not been closed.
-
-Head, topological
-    A changeset with no children in the repository.
-
-History, immutable
-    Once committed, changesets cannot be altered.  Extensions which
-    appear to change history actually create new changesets that
-    replace existing ones, and then destroy the old changesets. Doing
-    so in public repositories can result in old changesets being
-    reintroduced to the repository.
-
-History, rewriting
-    The changesets in a repository are immutable. However, extensions
-    to Mercurial can be used to alter the repository, usually in such
-    a way as to preserve changeset contents.
-
-Immutable history
-    See 'History, immutable'.
-
-Merge changeset
-    See 'Changeset, merge'.
-
-Manifest
-    Each changeset has a manifest, which is the list of files that are
-    tracked by the changeset.
-
-Merge
-    Used to bring together divergent branches of work. When you update
-    to a changeset and then merge another changeset, you bring the
-    history of the latter changeset into your working directory. Once
-    conflicts are resolved (and marked), this merge may be committed
-    as a merge changeset, bringing two branches together in the DAG.
-
-Named branch
-    See 'Branch, named'.
-
-Null changeset
-    The empty changeset. It is the parent state of newly-initialized
-    repositories and repositories with no checked out revision. It is
-    thus the parent of root changesets and the effective ancestor when
-    merging unrelated changesets. Can be specified by the alias 'null'
-    or by the changeset ID '000000000000'.
-
-Parent
-    See 'Changeset, parent'.
-
-Parent changeset
-    See 'Changeset, parent'.
-
-Parent, working directory
-    The working directory parent reflects a virtual revision which is
-    the child of the changeset (or two changesets with an uncommitted
-    merge) shown by :hg:`parents`. This is changed with
-    :hg:`update`. Other commands to see the working directory parent
-    are :hg:`summary` and :hg:`id`. Can be specified by the alias ".".
-
-Patch
-    (Noun) The product of a diff operation.
-
-    Example: "I've sent you my patch."
-
-    (Verb) The process of using a patch file to transform one
-    changeset into another.
-
-    Example: "You will need to patch that revision."
-
-Phase
-    A per-changeset state tracking how the changeset has been or
-    should be shared. See :hg:`help phases`.
-
-Public
-    Changesets in the public phase have been shared with publishing
-    repositories and are therefore considered immutable. See :hg:`help
-    phases`.
-
-Pull
-    An operation in which changesets in a remote repository which are
-    not in the local repository are brought into the local
-    repository. Note that this operation without special arguments
-    only updates the repository, it does not update the files in the
-    working directory. See :hg:`help pull`.
-
-Push
-    An operation in which changesets in a local repository which are
-    not in a remote repository are sent to the remote repository. Note
-    that this operation only adds changesets which have been committed
-    locally to the remote repository. Uncommitted changes are not
-    sent. See :hg:`help push`.
-
-Repository
-    The metadata describing all recorded states of a collection of
-    files. Each recorded state is represented by a changeset. A
-    repository is usually (but not always) found in the ``.hg``
-    subdirectory of a working directory. Any recorded state can be
-    recreated by "updating" a working directory to a specific
-    changeset.
-
-Repository head
-    See 'Head, repository'.
-
-Revision
-    A state of the repository at some point in time. Earlier revisions
-    can be updated to by using :hg:`update`.  See also 'Revision
-    number'; See also 'Changeset'.
-
-Revision number
-    This integer uniquely identifies a changeset in a specific
-    repository. It represents the order in which changesets were added
-    to a repository, starting with revision number 0. Note that the
-    revision number may be different in each clone of a repository. To
-    identify changesets uniquely between different clones, see
-    'Changeset id'.
-
-Revlog
-    History storage mechanism used by Mercurial. It is a form of delta
-    encoding, with occasional full revision of data followed by delta
-    of each successive revision. It includes data and an index
-    pointing to the data.
-
-Rewriting history
-    See 'History, rewriting'.
-
-Root
-    A changeset that has only the null changeset as its parent. Most
-    repositories have only a single root changeset.
-
-Secret
-    Changesets in the secret phase may not be shared via push, pull,
-    or clone. See :hg:`help phases`.
-
-Tag
-    An alternative name given to a changeset. Tags can be used in all
-    places where Mercurial expects a changeset ID, e.g., with
-    :hg:`update`. The creation of a tag is stored in the history and
-    will thus automatically be shared with other using push and pull.
-
-Tip
-    The changeset with the highest revision number. It is the changeset
-    most recently added in a repository.
-
-Tip, branch
-    The head of a given branch with the highest revision number. When
-    a branch name is used as a revision identifier, it refers to the
-    branch tip. See also 'Branch, head'. Note that because revision
-    numbers may be different in different repository clones, the
-    branch tip may be different in different cloned repositories.
-
-Update
-    (Noun) Another synonym of changeset.
-
-    Example: "I've pushed an update."
-
-    (Verb) This term is usually used to describe updating the state of
-    the working directory to that of a specific changeset. See
-    :hg:`help update`.
-
-    Example: "You should update."
-
-Working directory
-    See 'Directory, working'.
-
-Working directory parent
-    See 'Parent, working directory'.
--- a/mercurial/help/hg-ssh.8.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,71 +0,0 @@
-========
- hg-ssh
-========
-
-----------------------------------------
-restricted ssh login shell for Mercurial
-----------------------------------------
-
-:Author:         Thomas Arendsen Hein <thomas@intevation.de>
-:Organization:   Mercurial
-:Manual section: 8
-:Manual group:   Mercurial Manual
-
-.. contents::
-   :backlinks: top
-   :class: htmlonly
-   :depth: 1
-
-Synopsis
-""""""""
-**hg-ssh** repositories...
-
-Description
-"""""""""""
-**hg-ssh** is a wrapper for ssh access to a limited set of mercurial repos.
-
-To be used in ~/.ssh/authorized_keys with the "command" option, see sshd(8):
-command="hg-ssh path/to/repo1 /path/to/repo2 ~/repo3 ~user/repo4" ssh-dss ...
-(probably together with these other useful options:
-no-port-forwarding,no-X11-forwarding,no-agent-forwarding)
-
-This allows pull/push over ssh from/to the repositories given as arguments.
-
-If all your repositories are subdirectories of a common directory, you can
-allow shorter paths with:
-command="cd path/to/my/repositories && hg-ssh repo1 subdir/repo2"
-
-You can use pattern matching of your normal shell, e.g.:
-command="cd repos && hg-ssh user/thomas/* projects/{mercurial,foo}"
-
-You can also add a --read-only flag to allow read-only access to a key, e.g.:
-command="hg-ssh --read-only repos/\*"
-
-Bugs
-""""
-Probably lots, please post them to the mailing list (see Resources_
-below) when you find them.
-
-See Also
-""""""""
-|hg(1)|_
-
-Author
-""""""
-Written by Matt Mackall <mpm@selenic.com>
-
-Resources
-"""""""""
-Main Web Site: https://mercurial-scm.org/
-
-Source code repository: https://www.mercurial-scm.org/repo/hg
-
-Mailing list: https://www.mercurial-scm.org/mailman/listinfo/mercurial/
-
-Copying
-"""""""
-Copyright (C) 2005-2016 Matt Mackall.
-Free use of this software is granted under the terms of the GNU General
-Public License version 2 or any later version.
-
-.. include:: common.txt
--- a/mercurial/help/hg.1.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,119 +0,0 @@
-====
- hg
-====
-
----------------------------------------
-Mercurial source code management system
----------------------------------------
-
-:Author:         Matt Mackall <mpm@selenic.com>
-:Organization:   Mercurial
-:Manual section: 1
-:Manual group:   Mercurial Manual
-
-.. contents::
-   :backlinks: top
-   :class: htmlonly
-   :depth: 1
-
-
-Synopsis
-""""""""
-**hg** *command* [*option*]... [*argument*]...
-
-Description
-"""""""""""
-The **hg** command provides a command line interface to the Mercurial
-system.
-
-Command Elements
-""""""""""""""""
-
-files...
-    indicates one or more filename or relative path filenames; see
-    `File Name Patterns`_ for information on pattern matching
-
-path
-    indicates a path on the local machine
-
-revision
-    indicates a changeset which can be specified as a changeset
-    revision number, a tag, or a unique substring of the changeset
-    hash value
-
-repository path
-    either the pathname of a local repository or the URI of a remote
-    repository.
-
-.. include:: hg.1.gendoc.txt
-
-Files
-"""""
-
-``/etc/mercurial/hgrc``, ``$HOME/.hgrc``, ``.hg/hgrc``
-    This file contains defaults and configuration. Values in
-    ``.hg/hgrc`` override those in ``$HOME/.hgrc``, and these override
-    settings made in the global ``/etc/mercurial/hgrc`` configuration.
-    See |hgrc(5)|_ for details of the contents and format of these
-    files.
-
-``.hgignore``
-    This file contains regular expressions (one per line) that
-    describe file names that should be ignored by **hg**. For details,
-    see |hgignore(5)|_.
-
-``.hgsub``
-    This file defines the locations of all subrepositories, and
-    tells where the subrepository checkouts came from. For details, see
-    :hg:`help subrepos`.
-
-``.hgsubstate``
-    This file is where Mercurial stores all nested repository states. *NB: This
-    file should not be edited manually.*
-
-``.hgtags``
-    This file contains changeset hash values and text tag names (one
-    of each separated by spaces) that correspond to tagged versions of
-    the repository contents. The file content is encoded using UTF-8.
-
-``.hg/last-message.txt``
-    This file is used by :hg:`commit` to store a backup of the commit message
-    in case the commit fails.
-
-``.hg/localtags``
-    This file can be used to define local tags which are not shared among
-    repositories. The file format is the same as for ``.hgtags``, but it is
-    encoded using the local system encoding.
-
-Some commands (e.g. revert) produce backup files ending in ``.orig``,
-if the ``.orig`` file already exists and is not tracked by Mercurial,
-it will be overwritten.
-
-Bugs
-""""
-Probably lots, please post them to the mailing list (see Resources_
-below) when you find them.
-
-See Also
-""""""""
-|hgignore(5)|_, |hgrc(5)|_
-
-Author
-""""""
-Written by Matt Mackall <mpm@selenic.com>
-
-Resources
-"""""""""
-Main Web Site: https://mercurial-scm.org/
-
-Source code repository: https://www.mercurial-scm.org/repo/hg
-
-Mailing list: https://www.mercurial-scm.org/mailman/listinfo/mercurial/
-
-Copying
-"""""""
-Copyright (C) 2005-2019 Matt Mackall.
-Free use of this software is granted under the terms of the GNU General
-Public License version 2 or any later version.
-
-.. include:: common.txt
--- a/mercurial/help/hgignore.5.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-==========
- hgignore
-==========
-
----------------------------------
-syntax for Mercurial ignore files
----------------------------------
-
-:Author:         Vadim Gelfer <vadim.gelfer@gmail.com>
-:Organization:   Mercurial
-:Manual section: 5
-:Manual group:   Mercurial Manual
-
-.. include:: hgignore.5.gendoc.txt
-
-Author
-======
-Vadim Gelfer <vadim.gelfer@gmail.com>
-
-Mercurial was written by Matt Mackall <mpm@selenic.com>.
-
-See Also
-========
-|hg(1)|_, |hgrc(5)|_
-
-Copying
-=======
-This manual page is copyright 2006 Vadim Gelfer.
-Mercurial is copyright 2005-2019 Matt Mackall.
-Free use of this software is granted under the terms of the GNU General
-Public License version 2 or any later version.
-
-.. include:: common.txt
-
--- a/mercurial/help/hgignore.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-Synopsis
-========
-
-The Mercurial system uses a file called ``.hgignore`` in the root
-directory of a repository to control its behavior when it searches
-for files that it is not currently tracking.
-
-Description
-===========
-
-The working directory of a Mercurial repository will often contain
-files that should not be tracked by Mercurial. These include backup
-files created by editors and build products created by compilers.
-These files can be ignored by listing them in a ``.hgignore`` file in
-the root of the working directory. The ``.hgignore`` file must be
-created manually. It is typically put under version control, so that
-the settings will propagate to other repositories with push and pull.
-
-An untracked file is ignored if its path relative to the repository
-root directory, or any prefix path of that path, is matched against
-any pattern in ``.hgignore``.
-
-For example, say we have an untracked file, ``file.c``, at
-``a/b/file.c`` inside our repository. Mercurial will ignore ``file.c``
-if any pattern in ``.hgignore`` matches ``a/b/file.c``, ``a/b`` or ``a``.
-
-In addition, a Mercurial configuration file can reference a set of
-per-user or global ignore files. See the ``ignore`` configuration
-key on the ``[ui]`` section of :hg:`help config` for details of how to
-configure these files.
-
-To control Mercurial's handling of files that it manages, many
-commands support the ``-I`` and ``-X`` options; see
-:hg:`help <command>` and :hg:`help patterns` for details.
-
-Files that are already tracked are not affected by .hgignore, even
-if they appear in .hgignore. An untracked file X can be explicitly
-added with :hg:`add X`, even if X would be excluded by a pattern
-in .hgignore.
-
-Syntax
-======
-
-An ignore file is a plain text file consisting of a list of patterns,
-with one pattern per line. Empty lines are skipped. The ``#``
-character is treated as a comment character, and the ``\`` character
-is treated as an escape character.
-
-Mercurial supports several pattern syntaxes. The default syntax used
-is Python/Perl-style regular expressions.
-
-To change the syntax used, use a line of the following form::
-
-  syntax: NAME
-
-where ``NAME`` is one of the following:
-
-``regexp``
-  Regular expression, Python/Perl syntax.
-``glob``
-  Shell-style glob.
-``rootglob``
-  A variant of ``glob`` that is rooted (see below).
-
-The chosen syntax stays in effect when parsing all patterns that
-follow, until another syntax is selected.
-
-Neither ``glob`` nor regexp patterns are rooted. A glob-syntax
-pattern of the form ``*.c`` will match a file ending in ``.c`` in any
-directory, and a regexp pattern of the form ``\.c$`` will do the
-same. To root a regexp pattern, start it with ``^``. To get the same
-effect with glob-syntax, you have to use ``rootglob``.
-
-Subdirectories can have their own .hgignore settings by adding
-``subinclude:path/to/subdir/.hgignore`` to the root ``.hgignore``. See
-:hg:`help patterns` for details on ``subinclude:`` and ``include:``.
-
-.. note::
-
-  Patterns specified in other than ``.hgignore`` are always rooted.
-  Please see :hg:`help patterns` for details.
-
-Example
-=======
-
-Here is an example ignore file. ::
-
-  # use glob syntax.
-  syntax: glob
-
-  *.elc
-  *.pyc
-  *~
-
-  # switch to regexp syntax.
-  syntax: regexp
-  ^\.pc/
--- a/mercurial/help/hgrc.5.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-======
- hgrc
-======
-
----------------------------------
-configuration files for Mercurial
----------------------------------
-
-:Author:         Bryan O'Sullivan <bos@serpentine.com>
-:Organization:   Mercurial
-:Manual section: 5
-:Manual group:   Mercurial Manual
-
-.. contents::
-   :backlinks: top
-   :class: htmlonly
-
-
-Description
-===========
-
-.. include:: hgrc.5.gendoc.txt
-
-Author
-======
-Bryan O'Sullivan <bos@serpentine.com>.
-
-Mercurial was written by Matt Mackall <mpm@selenic.com>.
-
-See Also
-========
-|hg(1)|_, |hgignore(5)|_
-
-Copying
-=======
-This manual page is copyright 2005 Bryan O'Sullivan.
-Mercurial is copyright 2005-2019 Matt Mackall.
-Free use of this software is granted under the terms of the GNU General
-Public License version 2 or any later version.
-
-.. include:: common.txt
--- a/mercurial/help/hgweb.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-Mercurial's internal web server, hgweb, can serve either a single
-repository, or a tree of repositories. In the second case, repository
-paths and global options can be defined using a dedicated
-configuration file common to :hg:`serve`, ``hgweb.wsgi``,
-``hgweb.cgi`` and ``hgweb.fcgi``.
-
-This file uses the same syntax as other Mercurial configuration files
-but recognizes only the following sections:
-
-  - web
-  - paths
-  - collections
-
-The ``web`` options are thoroughly described in :hg:`help config`.
-
-The ``paths`` section maps URL paths to paths of repositories in the
-filesystem. hgweb will not expose the filesystem directly - only
-Mercurial repositories can be published and only according to the
-configuration.
-
-The left hand side is the path in the URL. Note that hgweb reserves
-subpaths like ``rev`` or ``file``, try using different names for
-nested repositories to avoid confusing effects.
-
-The right hand side is the path in the filesystem. If the specified
-path ends with ``*`` or ``**`` the filesystem will be searched
-recursively for repositories below that point.
-With ``*`` it will not recurse into the repositories it finds (except for
-``.hg/patches``).
-With ``**`` it will also search inside repository working directories
-and possibly find subrepositories.
-
-In this example::
-
-  [paths]
-  /projects/a = /srv/tmprepos/a
-  /projects/b = c:/repos/b
-  / = /srv/repos/*
-  /user/bob = /home/bob/repos/**
-
-- The first two entries make two repositories in different directories
-  appear under the same directory in the web interface
-- The third entry will publish every Mercurial repository found in
-  ``/srv/repos/``, for instance the repository ``/srv/repos/quux/``
-  will appear as ``http://server/quux/``
-- The fourth entry will publish both ``http://server/user/bob/quux/``
-  and ``http://server/user/bob/quux/testsubrepo/``
-
-The ``collections`` section is deprecated and has been superseded by
-``paths``.
-
-URLs and Common Arguments
-=========================
-
-URLs under each repository have the form ``/{command}[/{arguments}]``
-where ``{command}`` represents the name of a command or handler and
-``{arguments}`` represents any number of additional URL parameters
-to that command.
-
-The web server has a default style associated with it. Styles map to
-a collection of named templates. Each template is used to render a
-specific piece of data, such as a changeset or diff.
-
-The style for the current request can be overwritten two ways. First,
-if ``{command}`` contains a hyphen (``-``), the text before the hyphen
-defines the style. For example, ``/atom-log`` will render the ``log``
-command handler with the ``atom`` style. The second way to set the
-style is with the ``style`` query string argument. For example,
-``/log?style=atom``. The hyphenated URL parameter is preferred.
-
-Not all templates are available for all styles. Attempting to use
-a style that doesn't have all templates defined may result in an error
-rendering the page.
-
-Many commands take a ``{revision}`` URL parameter. This defines the
-changeset to operate on. This is commonly specified as the short,
-12 digit hexadecimal abbreviation for the full 40 character unique
-revision identifier. However, any value described by
-:hg:`help revisions` typically works.
-
-Commands and URLs
-=================
-
-The following web commands and their URLs are available:
-
-  .. webcommandsmarker
--- a/mercurial/help/internals/bundle2.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,677 +0,0 @@
-Bundle2 refers to a data format that is used for both on-disk storage
-and over-the-wire transfer of repository data and state.
-
-The data format allows the capture of multiple components of
-repository data. Contrast with the initial bundle format, which
-only captured *changegroup* data (and couldn't store bookmarks,
-phases, etc).
-
-Bundle2 is used for:
-
-* Transferring data from a repository (e.g. as part of an ``hg clone``
-  or ``hg pull`` operation).
-* Transferring data to a repository (e.g. as part of an ``hg push``
-  operation).
-* Storing data on disk (e.g. the result of an ``hg bundle``
-  operation).
-* Transferring the results of a repository operation (e.g. the
-  reply to an ``hg push`` operation).
-
-At its highest level, a bundle2 payload is a stream that begins
-with some metadata and consists of a series of *parts*, with each
-part describing repository data or state or the result of an
-operation. New bundle2 parts are introduced over time when there is
-a need to capture a new form of data. A *capabilities* mechanism
-exists to allow peers to understand which bundle2 parts the other
-understands.
-
-Stream Format
-=============
-
-A bundle2 payload consists of a magic string (``HG20``) followed by
-stream level parameters, followed by any number of payload *parts*.
-
-It may help to think of the stream level parameters as *headers* and the
-payload parts as the *body*.
-
-Stream Level Parameters
------------------------
-
-Following the magic string is data that defines parameters applicable to the
-entire payload.
-
-Stream level parameters begin with a 32-bit unsigned big-endian integer.
-The value of this integer defines the number of bytes of stream level
-parameters that follow.
-
-The *N* bytes of raw data contains a space separated list of parameters.
-Each parameter consists of a required name and an optional value.
-
-Parameters have the form ``<name>`` or ``<name>=<value>``.
-
-Both the parameter name and value are URL quoted.
-
-Names MUST start with a letter. If the first letter is lower case, the
-parameter is advisory and can safely be ignored. If the first letter
-is upper case, the parameter is mandatory and the handler MUST stop if
-it is unable to process it.
-
-Stream level parameters apply to the entire bundle2 payload. Lower-level
-options should go into a bundle2 part instead.
-
-The following stream level parameters are defined:
-
-Compression
-   Compression format of payload data. ``GZ`` denotes zlib. ``BZ``
-   denotes bzip2. ``ZS`` denotes zstandard.
-
-   When defined, all bytes after the stream level parameters are
-   compressed using the compression format defined by this parameter.
-
-   If this parameter isn't present, data is raw/uncompressed.
-
-   This parameter MUST be mandatory because attempting to consume
-   streams without knowing how to decode the underlying bytes will
-   result in errors.
-
-Payload Part
-------------
-
-Following the stream level parameters are 0 or more payload parts. Each
-payload part consists of a header and a body.
-
-The payload part header consists of a 32-bit unsigned big-endian integer
-defining the number of bytes in the header that follow. The special
-value ``0`` indicates the end of the bundle2 stream.
-
-The binary format of the part header is as follows:
-
-* 8-bit unsigned size of the part name
-* N-bytes alphanumeric part name
-* 32-bit unsigned big-endian part ID
-* N bytes part parameter data
-
-The *part name* identifies the type of the part. A part name with an
-UPPERCASE letter is mandatory. Otherwise, the part is advisory. A
-consumer should abort if it encounters a mandatory part it doesn't know
-how to process. See the sections below for each defined part type.
-
-The *part ID* is a unique identifier within the bundle used to refer to a
-specific part. It should be unique within the bundle2 payload.
-
-Part parameter data consists of:
-
-* 1 byte number of mandatory parameters
-* 1 byte number of advisory parameters
-* 2 * N bytes of sizes of parameter key and values
-* N * M blobs of values for parameter key and values
-
-Following the 2 bytes of mandatory and advisory parameter counts are
-2-tuples of bytes of the sizes of each parameter. e.g.
-(<key size>, <value size>).
-
-Following that are the raw values, without padding. Mandatory parameters
-come first, followed by advisory parameters.
-
-Each parameter's key MUST be unique within the part.
-
-Following the part parameter data is the part payload. The part payload
-consists of a series of framed chunks. The frame header is a 32-bit
-big-endian integer defining the size of the chunk. The N bytes of raw
-payload data follows.
-
-The part payload consists of 0 or more chunks.
-
-A chunk with size ``0`` denotes the end of the part payload. Therefore,
-there will always be at least 1 32-bit integer following the payload
-part header.
-
-A chunk size of ``-1`` is used to signal an *interrupt*. If such a chunk
-size is seen, the stream processor should process the next bytes as a new
-payload part. After this payload part, processing of the original,
-interrupted part should resume.
-
-Capabilities
-============
-
-Bundle2 is a dynamic format that can evolve over time. For example,
-when a new repository data concept is invented, a new bundle2 part
-is typically invented to hold that data. In addition, parts performing
-similar functionality may come into existence if there is a better
-mechanism for performing certain functionality.
-
-Because the bundle2 format evolves over time, peers need to understand
-what bundle2 features the other can understand. The *capabilities*
-mechanism is how those features are expressed.
-
-Bundle2 capabilities are logically expressed as a dictionary of
-string key-value pairs where the keys are strings and the values
-are lists of strings.
-
-Capabilities are encoded for exchange between peers. The encoded
-capabilities blob consists of a newline (``\n``) delimited list of
-entries. Each entry has the form ``<key>`` or ``<key>=<value>``,
-depending if the capability has a value.
-
-The capability name is URL quoted (``%XX`` encoding of URL unsafe
-characters).
-
-The value, if present, is formed by URL quoting each value in
-the capability list and concatenating the result with a comma (``,``).
-
-For example, the capabilities ``novaluekey`` and ``listvaluekey``
-with values ``value 1`` and ``value 2``. This would be encoded as:
-
-   listvaluekey=value%201,value%202\nnovaluekey
-
-The sections below detail the defined bundle2 capabilities.
-
-HG20
-----
-
-Denotes that the peer supports the bundle2 data format.
-
-bookmarks
----------
-
-Denotes that the peer supports the ``bookmarks`` part.
-
-Peers should not issue mandatory ``bookmarks`` parts unless this
-capability is present.
-
-changegroup
------------
-
-Denotes which versions of the *changegroup* format the peer can
-receive. Values include ``01``, ``02``, and ``03``.
-
-The peer should not generate changegroup data for a version not
-specified by this capability.
-
-checkheads
-----------
-
-Denotes which forms of heads checking the peer supports.
-
-If ``related`` is in the value, then the peer supports the ``check:heads``
-part and the peer is capable of detecting race conditions when applying
-changelog data.
-
-digests
--------
-
-Denotes which hashing formats the peer supports.
-
-Values are names of hashing function. Values include ``md5``, ``sha1``,
-and ``sha512``.
-
-error
------
-
-Denotes which ``error:`` parts the peer supports.
-
-Value is a list of strings of ``error:`` part names. Valid values
-include ``abort``, ``unsupportecontent``, ``pushraced``, and ``pushkey``.
-
-Peers should not issue an ``error:`` part unless the type of that
-part is listed as supported by this capability.
-
-listkeys
---------
-
-Denotes that the peer supports the ``listkeys`` part.
-
-hgtagsfnodes
-------------
-
-Denotes that the peer supports the ``hgtagsfnodes`` part.
-
-obsmarkers
-----------
-
-Denotes that the peer supports the ``obsmarker`` part and which versions
-of the obsolescence data format it can receive. Values are strings like
-``V<N>``. e.g. ``V1``.
-
-phases
-------
-
-Denotes that the peer supports the ``phases`` part.
-
-pushback
---------
-
-Denotes that the peer supports sending/receiving bundle2 data in response
-to a bundle2 request.
-
-This capability is typically used by servers that employ server-side
-rewriting of pushed repository data. For example, a server may wish to
-automatically rebase pushed changesets. When this capability is present,
-the server can send a bundle2 response containing the rewritten changeset
-data and the client will apply it.
-
-pushkey
--------
-
-Denotes that the peer supports the ``puskey`` part.
-
-remote-changegroup
-------------------
-
-Denotes that the peer supports the ``remote-changegroup`` part and
-which protocols it can use to fetch remote changegroup data.
-
-Values are protocol names. e.g. ``http`` and ``https``.
-
-stream
-------
-
-Denotes that the peer supports ``stream*`` parts in order to support
-*stream clone*.
-
-Values are which ``stream*`` parts the peer supports. ``v2`` denotes
-support for the ``stream2`` part.
-
-Bundle2 Part Types
-==================
-
-The sections below detail the various bundle2 part types.
-
-bookmarks
----------
-
-The ``bookmarks`` part holds bookmarks information.
-
-This part has no parameters.
-
-The payload consists of entries defining bookmarks. Each entry consists of:
-
-* 20 bytes binary changeset node.
-* 2 bytes big endian short defining bookmark name length.
-* N bytes defining bookmark name.
-
-Receivers typically update bookmarks to match the state specified in
-this part.
-
-changegroup
------------
-
-The ``changegroup`` part contains *changegroup* data (changelog, manifestlog,
-and filelog revision data).
-
-The following part parameters are defined for this part.
-
-version
-   Changegroup version string. e.g. ``01``, ``02``, and ``03``. This parameter
-   determines how to interpret the changegroup data within the part.
-
-nbchanges
-   The number of changesets in this changegroup. This parameter can be used
-   to aid in the display of progress bars, etc during part application.
-
-treemanifest
-   Whether the changegroup contains tree manifests.
-
-targetphase
-   The target phase of changesets in this part. Value is an integer of
-   the target phase.
-
-The payload of this part is raw changegroup data. See
-:hg:`help internals.changegroups` for the format of changegroup data.
-
-check:bookmarks
----------------
-
-The ``check:bookmarks`` part is inserted into a bundle as a means for the
-receiver to validate that the sender's known state of bookmarks matches
-the receiver's.
-
-This part has no parameters.
-
-The payload is a binary stream of bookmark data. Each entry in the stream
-consists of:
-
-* 20 bytes binary node that bookmark is associated with
-* 2 bytes unsigned short defining length of bookmark name
-* N bytes containing the bookmark name
-
-If all bits in the node value are ``1``, then this signifies a missing
-bookmark.
-
-When the receiver encounters this part, for each bookmark in the part
-payload, it should validate that the current bookmark state matches
-the specified state. If it doesn't, then the receiver should take
-appropriate action. (In the case of pushes, this mismatch signifies
-a race condition and the receiver should consider rejecting the push.)
-
-check:heads
------------
-
-The ``check:heads`` part is a means to validate that the sender's state
-of DAG heads matches the receiver's.
-
-This part has no parameters.
-
-The body of this part is an array of 20 byte binary nodes representing
-changeset heads.
-
-Receivers should compare the set of heads defined in this part to the
-current set of repo heads and take action if there is a mismatch in that
-set.
-
-Note that this part applies to *all* heads in the repo.
-
-check:phases
-------------
-
-The ``check:phases`` part validates that the sender's state of phase
-boundaries matches the receiver's.
-
-This part has no parameters.
-
-The payload consists of an array of 24 byte entries. Each entry is
-a big endian 32-bit integer defining the phase integer and 20 byte
-binary node value.
-
-For each changeset defined in this part, the receiver should validate
-that its current phase matches the phase defined in this part. The
-receiver should take appropriate action if a mismatch occurs.
-
-check:updated-heads
--------------------
-
-The ``check:updated-heads`` part validates that the sender's state of
-DAG heads updated by this bundle matches the receiver's.
-
-This type is nearly identical to ``check:heads`` except the heads
-in the payload are only a subset of heads in the repository. The
-receiver should validate that all nodes specified by the sender are
-branch heads and take appropriate action if not.
-
-error:abort
------------
-
-The ``error:abort`` part conveys a fatal error.
-
-The following part parameters are defined:
-
-message
-   The string content of the error message.
-
-hint
-   Supplemental string giving a hint on how to fix the problem.
-
-error:pushkey
--------------
-
-The ``error:pushkey`` part conveys an error in the *pushkey* protocol.
-
-The following part parameters are defined:
-
-namespace
-   The pushkey domain that exhibited the error.
-
-key
-   The key whose update failed.
-
-new
-   The value we tried to set the key to.
-
-old
-   The old value of the key (as supplied by the client).
-
-ret
-   The integer result code for the pushkey request.
-
-in-reply-to
-   Part ID that triggered this error.
-
-This part is generated if there was an error applying *pushkey* data.
-Pushkey data includes bookmarks, phases, and obsolescence markers.
-
-error:pushraced
----------------
-
-The ``error:pushraced`` part conveys that an error occurred and
-the likely cause is losing a race with another pusher.
-
-The following part parameters are defined:
-
-message
-   String error message.
-
-This part is typically emitted when a receiver examining ``check:*``
-parts encountered inconsistency between incoming state and local state.
-The likely cause of that inconsistency is another repository change
-operation (often another client performing an ``hg push``).
-
-error:unsupportedcontent
-------------------------
-
-The ``error:unsupportedcontent`` part conveys that a bundle2 receiver
-encountered a part or content it was not able to handle.
-
-The following part parameters are defined:
-
-parttype
-   The name of the part that triggered this error.
-
-params
-   ``\0`` delimited list of parameters.
-
-hgtagsfnodes
-------------
-
-The ``hgtagsfnodes`` type defines file nodes for the ``.hgtags`` file
-for various changesets.
-
-This part has no parameters.
-
-The payload is an array of pairs of 20 byte binary nodes. The first node
-is a changeset node. The second node is the ``.hgtags`` file node.
-
-Resolving tags requires resolving the ``.hgtags`` file node for changesets.
-On large repositories, this can be expensive. Repositories cache the
-mapping of changeset to ``.hgtags`` file node on disk as a performance
-optimization. This part allows that cached data to be transferred alongside
-changeset data.
-
-Receivers should update their ``.hgtags`` cache file node mappings with
-the incoming data.
-
-listkeys
---------
-
-The ``listkeys`` part holds content for a *pushkey* namespace.
-
-The following part parameters are defined:
-
-namespace
-   The pushkey domain this data belongs to.
-
-The part payload contains a newline (``\n``) delimited list of
-tab (``\t``) delimited key-value pairs defining entries in this pushkey
-namespace.
-
-obsmarkers
-----------
-
-The ``obsmarkers`` part defines obsolescence markers.
-
-This part has no parameters.
-
-The payload consists of obsolescence markers using the on-disk markers
-format. The first byte defines the version format.
-
-The receiver should apply the obsolescence markers defined in this
-part. A ``reply:obsmarkers`` part should be sent to the sender, if possible.
-
-output
-------
-
-The ``output`` part is used to display output on the receiver.
-
-This part has no parameters.
-
-The payload consists of raw data to be printed on the receiver.
-
-phase-heads
------------
-
-The ``phase-heads`` part defines phase boundaries.
-
-This part has no parameters.
-
-The payload consists of an array of 24 byte entries. Each entry is
-a big endian 32-bit integer defining the phase integer and 20 byte
-binary node value.
-
-pushkey
--------
-
-The ``pushkey`` part communicates an intent to perform a ``pushkey``
-request.
-
-The following part parameters are defined:
-
-namespace
-   The pushkey domain to operate on.
-
-key
-   The key within the pushkey namespace that is being changed.
-
-old
-   The old value for the key being changed.
-
-new
-   The new value for the key being changed.
-
-This part has no payload.
-
-The receiver should perform a pushkey operation as described by this
-part's parameters.
-
-If the pushey operation fails, a ``reply:pushkey`` part should be sent
-back to the sender, if possible. The ``in-reply-to`` part parameter
-should reference the source part.
-
-pushvars
---------
-
-The ``pushvars`` part defines environment variables that should be
-set when processing this bundle2 payload.
-
-The part's advisory parameters define environment variables.
-
-There is no part payload.
-
-When received, part parameters are prefixed with ``USERVAR_`` and the
-resulting variables are defined in the hooks context for the current
-bundle2 application. This part provides a mechanism for senders to
-inject extra state into the hook execution environment on the receiver.
-
-remote-changegroup
-------------------
-
-The ``remote-changegroup`` part defines an external location of a bundle
-to apply. This part can be used by servers to serve pre-generated bundles
-hosted at arbitrary URLs.
-
-The following part parameters are defined:
-
-url
-   The URL of the remote bundle.
-
-size
-   The size in bytes of the remote bundle.
-
-digests
-   A space separated list of the digest types provided in additional
-   part parameters.
-
-digest:<type>
-   The hexadecimal representation of the digest (hash) of the remote bundle.
-
-There is no payload for this part type.
-
-When encountered, clients should attempt to fetch the URL being advertised
-and read and apply it as a bundle.
-
-The ``size`` and ``digest:<type>`` parameters should be used to validate
-that the downloaded bundle matches what was advertised. If a mismatch occurs,
-the client should abort.
-
-reply:changegroup
------------------
-
-The ``reply:changegroup`` part conveys the results of application of a
-``changegroup`` part.
-
-The following part parameters are defined:
-
-return
-   Integer return code from changegroup application.
-
-in-reply-to
-   Part ID of part this reply is in response to.
-
-reply:obsmarkers
-----------------
-
-The ``reply:obsmarkers`` part conveys the results of applying an
-``obsmarkers`` part.
-
-The following part parameters are defined:
-
-new
-   The integer number of new markers that were applied.
-
-in-reply-to
-   The part ID that this part is in reply to.
-
-reply:pushkey
--------------
-
-The ``reply:pushkey`` part conveys the result of a *pushkey* operation.
-
-The following part parameters are defined:
-
-return
-   Integer result code from pushkey operation.
-
-in-reply-to
-   Part ID that triggered this pushkey operation.
-
-This part has no payload.
-
-replycaps
----------
-
-The ``replycaps`` part notifies the receiver that a reply bundle should
-be created.
-
-This part has no parameters.
-
-The payload consists of a bundle2 capabilities blob.
-
-stream2
--------
-
-The ``stream2`` part contains *streaming clone* version 2 data.
-
-The following part parameters are defined:
-
-requirements
-   URL quoted repository requirements string. Requirements are delimited by a
-   command (``,``).
-
-filecount
-   The total number of files being transferred in the payload.
-
-bytecount
-   The total size of file content being transferred in the payload.
-
-The payload consists of raw stream clone version 2 data.
-
-The ``filecount`` and ``bytecount`` parameters can be used for progress and
-reporting purposes. The values may not be exact.
--- a/mercurial/help/internals/bundles.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-A bundle is a container for repository data.
-
-Bundles are used as standalone files as well as the interchange format
-over the wire protocol used when two Mercurial peers communicate with
-each other.
-
-Headers
-=======
-
-Bundles produced since Mercurial 0.7 (September 2005) have a 4 byte
-header identifying the major bundle type. The header always begins with
-``HG`` and the follow 2 bytes indicate the bundle type/version. Some
-bundle types have additional data after this 4 byte header.
-
-The following sections describe each bundle header/type.
-
-HG10
-----
-
-``HG10`` headers indicate a *changegroup bundle*. This is the original
-bundle format, so it is sometimes referred to as *bundle1*. It has been
-present since version 0.7 (released September 2005).
-
-This header is followed by 2 bytes indicating the compression algorithm
-used for data that follows. All subsequent data following this
-compression identifier is compressed according to the algorithm/method
-specified.
-
-Supported algorithms include the following.
-
-``BZ``
-   *bzip2* compression.
-
-   Bzip2 compressors emit a leading ``BZ`` header. Mercurial uses this
-   leading ``BZ`` as part of the bundle header. Therefore consumers
-   of bzip2 bundles need to *seed* the bzip2 decompressor with ``BZ`` or
-   seek the input stream back to the beginning of the algorithm component
-   of the bundle header so that decompressor input is valid. This behavior
-   is unique among supported compression algorithms.
-
-   Supported since version 0.7 (released December 2006).
-
-``GZ``
-  *zlib* compression.
-
-   Supported since version 0.9.2 (released December 2006).
-
-``UN``
-  *Uncompressed* or no compression. Unmodified changegroup data follows.
-
-  Supported since version 0.9.2 (released December 2006).
-
-3rd party extensions may implement their own compression. However, no
-authority reserves values for their compression algorithm identifiers.
-
-HG2X
-----
-
-``HG2X`` headers (where ``X`` is any value) denote a *bundle2* bundle.
-Bundle2 bundles are a container format for various kinds of repository
-data and capabilities, beyond changegroup data (which was the only data
-supported by ``HG10`` bundles.
-
-``HG20`` is currently the only defined bundle2 version.
-
-The ``HG20`` format is documented at :hg:`help internals.bundle2`.
-
-Initial ``HG20`` support was added in Mercurial 3.0 (released May
-2014). However, bundle2 bundles were hidden behind an experimental flag
-until version 3.5 (released August 2015), when they were enabled in the
-wire protocol. Various commands (including ``hg bundle``) did not
-support generating bundle2 files until Mercurial 3.6 (released November
-2015).
-
-HGS1
-----
-
-*Experimental*
-
-A ``HGS1`` header indicates a *streaming clone bundle*. This is a bundle
-that contains raw revlog data from a repository store. (Typically revlog
-data is exchanged in the form of changegroups.)
-
-The purpose of *streaming clone bundles* are to *clone* repository data
-very efficiently.
-
-The ``HGS1`` header is always followed by 2 bytes indicating a
-compression algorithm of the data that follows. Only ``UN``
-(uncompressed data) is currently allowed.
-
-``HGS1UN`` support was added as an experimental feature in version 3.6
-(released November 2015) as part of the initial offering of the *clone
-bundles* feature.
--- a/mercurial/help/internals/cbor.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-Mercurial uses Concise Binary Object Representation (CBOR)
-(RFC 7049) for various data formats.
-
-This document describes the subset of CBOR that Mercurial uses and
-gives recommendations for appropriate use of CBOR within Mercurial.
-
-Type Limitations
-================
-
-Major types 0 and 1 (unsigned integers and negative integers) MUST be
-fully supported.
-
-Major type 2 (byte strings) MUST be fully supported. However, there
-are limitations around the use of indefinite-length byte strings.
-(See below.)
-
-Major type 3 (text strings) are NOT supported.
-
-Major type 4 (arrays) MUST be supported. However, values are limited
-to the set of types described in the "Container Types" section below.
-And indefinite-length arrays are NOT supported.
-
-Major type 5 (maps) MUST be supported. However, key values are limited
-to the set of types described in the "Container Types" section below.
-And indefinite-length maps are NOT supported.
-
-Major type 6 (semantic tagging of major types) can be used with the
-following semantic tag values:
-
-258
-   Mathematical finite set. Suitable for representing Python's
-   ``set`` type.
-
-All other semantic tag values are not allowed.
-
-Major type 7 (simple data types) can be used with the following
-type values:
-
-20
-   False
-21
-   True
-22
-   Null
-31
-   Break stop code (for indefinite-length items).
-
-All other simple data type values (including every value requiring the
-1 byte extension) are disallowed.
-
-Indefinite-Length Byte Strings
-==============================
-
-Indefinite-length byte strings (major type 2) are allowed. However,
-they MUST NOT occur inside a container type (such as an array or map).
-i.e. they can only occur as the "top-most" element in a stream of
-values.
-
-Encoders and decoders SHOULD *stream* indefinite-length byte strings.
-i.e. an encoder or decoder SHOULD NOT buffer the entirety of a long
-byte string value when indefinite-length byte strings are being used
-if it can be avoided. Mercurial MAY use extremely long indefinite-length
-byte strings and buffering the source or destination value COULD lead to
-memory exhaustion.
-
-Chunks in an indefinite-length byte string SHOULD NOT exceed 2^20
-bytes.
-
-Container Types
-===============
-
-Mercurial may use the array (major type 4), map (major type 5), and
-set (semantic tag 258 plus major type 4 array) container types.
-
-An array may contain any supported type as values.
-
-A map MUST only use the following types as keys:
-
-* unsigned integers (major type 0)
-* negative integers (major type 1)
-* byte strings (major type 2) (but not indefinite-length byte strings)
-* false (simple type 20)
-* true (simple type 21)
-* null (simple type 22)
-
-A map MUST only use the following types as values:
-
-* all types supported as map keys
-* arrays
-* maps
-* sets
-
-A set may only use the following types as values:
-
-* all types supported as map keys
-
-It is recommended that keys in maps and values in sets and arrays all
-be of a uniform type.
-
-Avoiding Large Byte Strings
-===========================
-
-The use of large byte strings is discouraged, especially in scenarios where
-the total size of the byte string may by unbound for some inputs (e.g. when
-representing the content of a tracked file). It is highly recommended to use
-indefinite-length byte strings for these purposes.
-
-Since indefinite-length byte strings cannot be nested within an outer
-container (such as an array or map), to associate a large byte string
-with another data structure, it is recommended to use an array or
-map followed immediately by an indefinite-length byte string. For example,
-instead of the following map::
-
-   {
-      "key1": "value1",
-      "key2": "value2",
-      "long_value": "some very large value...",
-   }
-
-Use a map followed by a byte string:
-
-   {
-      "key1": "value1",
-      "key2": "value2",
-      "value_follows": True,
-   }
-   <BEGIN INDEFINITE-LENGTH BYTE STRING>
-   "some very large value"
-   "..."
-   <END INDEFINITE-LENGTH BYTE STRING>
--- a/mercurial/help/internals/censor.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-The censor system allows retroactively removing content from
-files. Actually censoring a node requires using the censor extension,
-but the functionality for handling censored nodes is partially in core.
-
-Censored nodes in a filelog have the flag ``REVIDX_ISCENSORED`` set,
-and the contents of the censored node are replaced with a censor
-tombstone. For historical reasons, the tombstone is packed in the
-filelog metadata field ``censored``. This allows censored nodes to be
-(mostly) safely transmitted through old formats like changegroup
-versions 1 and 2. When using changegroup formats older than 3, the
-receiver is required to re-add the ``REVIDX_ISCENSORED`` flag when
-storing the revision. This depends on the ``censored`` metadata key
-never being used for anything other than censoring revisions, which is
-true as of January 2017. Note that the revlog flag is the
-authoritative marker of a censored node: the tombstone should only be
-consulted when looking for a reason a node was censored or when revlog
-flags are unavailable as mentioned above.
-
-The tombstone data is a free-form string. It's expected that users of
-censor will want to record the reason for censoring a node in the
-tombstone. Censored nodes must be able to fit in the size of the
-content being censored.
--- a/mercurial/help/internals/changegroups.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,207 +0,0 @@
-Changegroups are representations of repository revlog data, specifically
-the changelog data, root/flat manifest data, treemanifest data, and
-filelogs.
-
-There are 3 versions of changegroups: ``1``, ``2``, and ``3``. From a
-high-level, versions ``1`` and ``2`` are almost exactly the same, with the
-only difference being an additional item in the *delta header*. Version
-``3`` adds support for storage flags in the *delta header* and optionally
-exchanging treemanifests (enabled by setting an option on the
-``changegroup`` part in the bundle2).
-
-Changegroups when not exchanging treemanifests consist of 3 logical
-segments::
-
-   +---------------------------------+
-   |           |          |          |
-   | changeset | manifest | filelogs |
-   |           |          |          |
-   |           |          |          |
-   +---------------------------------+
-
-When exchanging treemanifests, there are 4 logical segments::
-
-   +-------------------------------------------------+
-   |           |          |               |          |
-   | changeset |   root   | treemanifests | filelogs |
-   |           | manifest |               |          |
-   |           |          |               |          |
-   +-------------------------------------------------+
-
-The principle building block of each segment is a *chunk*. A *chunk*
-is a framed piece of data::
-
-   +---------------------------------------+
-   |           |                           |
-   |  length   |           data            |
-   | (4 bytes) |   (<length - 4> bytes)    |
-   |           |                           |
-   +---------------------------------------+
-
-All integers are big-endian signed integers. Each chunk starts with a 32-bit
-integer indicating the length of the entire chunk (including the length field
-itself).
-
-There is a special case chunk that has a value of 0 for the length
-(``0x00000000``). We call this an *empty chunk*.
-
-Delta Groups
-============
-
-A *delta group* expresses the content of a revlog as a series of deltas,
-or patches against previous revisions.
-
-Delta groups consist of 0 or more *chunks* followed by the *empty chunk*
-to signal the end of the delta group::
-
-  +------------------------------------------------------------------------+
-  |                |             |               |             |           |
-  | chunk0 length  | chunk0 data | chunk1 length | chunk1 data |    0x0    |
-  |   (4 bytes)    |  (various)  |   (4 bytes)   |  (various)  | (4 bytes) |
-  |                |             |               |             |           |
-  +------------------------------------------------------------------------+
-
-Each *chunk*'s data consists of the following::
-
-  +---------------------------------------+
-  |                        |              |
-  |     delta header       |  delta data  |
-  |  (various by version)  |  (various)   |
-  |                        |              |
-  +---------------------------------------+
-
-The *delta data* is a series of *delta*s that describe a diff from an existing
-entry (either that the recipient already has, or previously specified in the
-bundle/changegroup).
-
-The *delta header* is different between versions ``1``, ``2``, and
-``3`` of the changegroup format.
-
-Version 1 (headerlen=80)::
-
-   +------------------------------------------------------+
-   |            |             |             |             |
-   |    node    |   p1 node   |   p2 node   |  link node  |
-   | (20 bytes) |  (20 bytes) |  (20 bytes) |  (20 bytes) |
-   |            |             |             |             |
-   +------------------------------------------------------+
-
-Version 2 (headerlen=100)::
-
-   +------------------------------------------------------------------+
-   |            |             |             |            |            |
-   |    node    |   p1 node   |   p2 node   | base node  | link node  |
-   | (20 bytes) |  (20 bytes) |  (20 bytes) | (20 bytes) | (20 bytes) |
-   |            |             |             |            |            |
-   +------------------------------------------------------------------+
-
-Version 3 (headerlen=102)::
-
-   +------------------------------------------------------------------------------+
-   |            |             |             |            |            |           |
-   |    node    |   p1 node   |   p2 node   | base node  | link node  |   flags   |
-   | (20 bytes) |  (20 bytes) |  (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) |
-   |            |             |             |            |            |           |
-   +------------------------------------------------------------------------------+
-
-The *delta data* consists of ``chunklen - 4 - headerlen`` bytes, which contain a
-series of *delta*s, densely packed (no separators). These deltas describe a diff
-from an existing entry (either that the recipient already has, or previously
-specified in the bundle/changegroup). The format is described more fully in
-``hg help internals.bdiff``, but briefly::
-
-   +---------------------------------------------------------------+
-   |              |            |            |                      |
-   | start offset | end offset | new length |        content       |
-   |  (4 bytes)   |  (4 bytes) |  (4 bytes) | (<new length> bytes) |
-   |              |            |            |                      |
-   +---------------------------------------------------------------+
-
-Please note that the length field in the delta data does *not* include itself.
-
-In version 1, the delta is always applied against the previous node from
-the changegroup or the first parent if this is the first entry in the
-changegroup.
-
-In version 2 and up, the delta base node is encoded in the entry in the
-changegroup. This allows the delta to be expressed against any parent,
-which can result in smaller deltas and more efficient encoding of data.
-
-The *flags* field holds bitwise flags affecting the processing of revision
-data. The following flags are defined:
-
-32768
-   Censored revision. The revision's fulltext has been replaced by censor
-   metadata. May only occur on file revisions.
-16384
-   Ellipsis revision. Revision hash does not match data (likely due to rewritten
-   parents).
-8192
-   Externally stored. The revision fulltext contains ``key:value`` ``\n``
-   delimited metadata defining an object stored elsewhere. Used by the LFS
-   extension.
-
-For historical reasons, the integer values are identical to revlog version 1
-per-revision storage flags and correspond to bits being set in this 2-byte
-field. Bits were allocated starting from the most-significant bit, hence the
-reverse ordering and allocation of these flags.
-
-Changeset Segment
-=================
-
-The *changeset segment* consists of a single *delta group* holding
-changelog data. The *empty chunk* at the end of the *delta group* denotes
-the boundary to the *manifest segment*.
-
-Manifest Segment
-================
-
-The *manifest segment* consists of a single *delta group* holding manifest
-data. If treemanifests are in use, it contains only the manifest for the
-root directory of the repository. Otherwise, it contains the entire
-manifest data. The *empty chunk* at the end of the *delta group* denotes
-the boundary to the next segment (either the *treemanifests segment* or the
-*filelogs segment*, depending on version and the request options).
-
-Treemanifests Segment
----------------------
-
-The *treemanifests segment* only exists in changegroup version ``3``, and
-only if the 'treemanifest' param is part of the bundle2 changegroup part
-(it is not possible to use changegroup version 3 outside of bundle2).
-Aside from the filenames in the *treemanifests segment* containing a
-trailing ``/`` character, it behaves identically to the *filelogs segment*
-(see below). The final sub-segment is followed by an *empty chunk* (logically,
-a sub-segment with filename size 0). This denotes the boundary to the
-*filelogs segment*.
-
-Filelogs Segment
-================
-
-The *filelogs segment* consists of multiple sub-segments, each
-corresponding to an individual file whose data is being described::
-
-   +--------------------------------------------------+
-   |          |          |          |     |           |
-   | filelog0 | filelog1 | filelog2 | ... |    0x0    |
-   |          |          |          |     | (4 bytes) |
-   |          |          |          |     |           |
-   +--------------------------------------------------+
-
-The final filelog sub-segment is followed by an *empty chunk* (logically,
-a sub-segment with filename size 0). This denotes the end of the segment
-and of the overall changegroup.
-
-Each filelog sub-segment consists of the following::
-
-   +------------------------------------------------------+
-   |                 |                      |             |
-   | filename length |       filename       | delta group |
-   |    (4 bytes)    | (<length - 4> bytes) |  (various)  |
-   |                 |                      |             |
-   +------------------------------------------------------+
-
-That is, a *chunk* consisting of the filename (not terminated or padded)
-followed by N chunks constituting the *delta group* for this file. The
-*empty chunk* at the end of each *delta group* denotes the boundary to the
-next filelog sub-segment.
--- a/mercurial/help/internals/config.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,109 +0,0 @@
-All config options used within Mercurial should be registered.
-
-Config Option in Core
-=====================
-
-Config options used by Mercurial core are registered in the
-``mercurial.configitems`` module.
-
-Simple entry
-------------
-
-A registration entry typically looks like::
-
-    coreconfigitem('section', 'option',
-        default=MyDefaultValue,
-    )
-
-Once registered, Mercurial will know that ``section.option`` is a legitimate
-config option and that ``MyDefaultValue`` should be used if no other values are
-defined in configuration files.
-
-Complex default value
----------------------
-
-If the default provided is a callable, it is called to retrieve the default
-value when accessing the config option. This is useful for default values that
-are mutable like the empty list::
-
-    coreconfigitem('pager', 'ignore',
-        default=list,
-    )
-
-In addition, there are cases where the default is not fixed, but computed from
-other properties. In this case, use the ``dynamicdefault`` object as the value
-for the ``default`` parameter. A default value is then explicitly required when
-reading the option::
-
-    # registration
-    coreconfigitem('web', 'name',
-        default=dynamicdefault,
-    )
-
-    # usage
-    ui.config('web', 'name', dirname)
-
-Free form options
------------------
-
-Some config sections use free form options (e.g. ``paths``). You can register
-them using the ``generic`` parameters::
-
-    coreconfigitem('paths', '.*',
-        default=None,
-        generic=True,
-    )
-
-When ``generic=True`` is set, the option name is matched as a regular expression
-(rooted to string start). It can be used to select specific sub parameters::
-
-    coreconfigitem('merge-tools', br'.*\.args$',
-        default="$local $base $other",
-        generic=True,
-        priority=-1,
-    )
-
-The ``priority`` parameter controls the order used to match the generic pattern
-(lower first).
-
-Config Option in Extensions
-===========================
-
-General case
-------------
-
-Extensions should register config items through the ``registrar`` API (also used
-for commands and others)::
-
-    configtable = {}
-    configitem = registrar.configitem(configtable)
-
-    configitem('blackbox', 'dirty',
-        default=False,
-    )
-
-The ``dynamicdefault`` object is then available as
-``configitem.dynamicdefault``.
-
-Supporting older versions
--------------------------
-
-The registrar was introduced in Mercurial 4.3, and the ``generic`` parameter was
-introduced in 4.4. Starting with Mercurial 4.4, all core options were registered
-and developer warnings are emitted when accessing unregistered option.
-
-Extensions supporting versions older than Mercurial 4.3 cannot rely on the
-default value being registered. The simplest way to register an option while
-still supporting an older version is to use ``dynamicdefault`` for options
-requiring a default value. The existing code passing an explicit default can
-then stay in use until compatibility with Mercurial 4.2 is dropped.
-
-As reminder, here are the default values for each config type:
-
-    - config:      None
-    - configbool:  False
-    - configbytes: 0
-    - configdate:  None
-    - configint:   None
-    - configlist:  []
-    - configpath:  None
--- a/mercurial/help/internals/extensions.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,367 +0,0 @@
-Extensions allow the creation of new features and using them directly from
-the main hg command line as if they were built-in commands. The extensions
-have full access to the *internal* API.
-
-Use of Mercurial's internal API very likely makes your code subject to
-Mercurial's license. Before going any further, read the License page.
-
-There are NO guarantees that third-party code calling into Mercurial's
-internals won't break from release to release. If you do use Mercurial's API
-for published third-party code, we expect you to test your code before each
-major Mercurial release. This will prevent various bug reports from your users
-when they upgrade their copy of Mercurial.
-
-File Layout
-===========
-
-Extensions are usually written as simple python modules. Larger ones are
-better split into multiple modules of a single package (see the convert
-extension). The package root module gives its name to the extension and
-implements the ``cmdtable`` and optional callbacks described below.
-
-Command table
-=============
-
-To write your own extension, your python module can provide an optional dict
-named ``cmdtable`` with entries describing each command. A command should be
-registered to the ``cmdtable`` by ``@command`` decorator.
-
-Example using ``@command`` decorator (requires Mercurial 1.9)::
-
-    from mercurial.i18n import _
-
-    cmdtable = {}
-    try:
-        from mercurial import registrar
-        command = registrar.command(cmdtable)
-    except (AttributeError, ImportError):
-        # Fallback to hg < 4.3 support
-        from mercurial import cmdutil
-        command = cmdutil.command(cmdtable)
-
-    @command('print-parents',
-        [('s', 'short', None, _('print short form')),
-         ('l', 'long', None, _('print long form'))],
-        _('[options] node'))
-    def printparents(ui, repo, node, **opts):
-        ...
-
-The cmdtable dictionary
------------------------
-
-The ``cmdtable`` dictionary uses as key the new command names, and, as value,
-a tuple containing:
-
-1. the function to be called when the command is used.
-2. a list of options the command can take.
-3. a command line synopsis for the command (the function docstring is used for
-   the full help).
-
-List of options
----------------
-
-All the command flag options are documented in the mercurial/fancyopts.py
-sources.
-
-The options list is a list of tuples containing:
-
-1. the short option letter, or ``''`` if no short option is available
-   (for example, ``o`` for a ``-o`` option).
-2. the long option name (for example, ``option`` for a ``--option`` option).
-3. a default value for the option.
-4. a help string for the option (it's possible to omit the "hg newcommand"
-   part and only the options and parameter substring is needed).
-
-Command function signatures
----------------------------
-
-Functions that implement new commands always receive a ``ui`` and usually
-a ``repo`` parameter. The rest of parameters are taken from the command line
-items that don't start with a dash and are passed in the same order they were
-written. If no default value is given in the parameter list they are required.
-
-If there is no repo to be associated with the command and consequently no
-``repo`` passed, then ``norepo=True`` should be passed to the ``@command``
-decorator::
-
-    @command('mycommand', [], norepo=True)
-    def mycommand(ui, **opts):
-        ...
-
-For examples of ``norepo``, see the convert extension.
-
-Command function docstrings
-===========================
-
-The docstring of your function is used as the main help text, shown by
-``hg help mycommand``. The docstring should be formatted using a simple
-subset of reStructuredText markup. The supported constructs include:
-
-Paragraphs::
-
-    This is a paragraph.
-
-    Paragraphs are separated
-    by blank lines.
-
-A verbatim block is introduced with a double colon followed by an indented
-block. The double colon is turned into a single colon on display::
-
-    Some text::
-
-      verbatim
-        text
-         !!
-
-We have field lists::
-
-    :key1: value1
-    :key2: value2
-
-Bullet lists::
-
-    - foo
-    - bar
-
-Enumerated lists::
-
-    1. foo
-    2. bar
-
-Inline markup::
-
-    ``*bold*``, ``monospace``, :hg:`command`
-
-Mark Mercurial commands with ``:hg:`` to make a nice link to the corresponding
-documentation. We'll expand the support if new constructs can be parsed
-without too much trouble.
-
-Communicating with the user
-===========================
-
-Besides the ``ui`` methods, like ``ui.write(*msg)`` or
-``ui.prompt(msg, default="y")``, an extension can add help text for each
-of its commands and the extension itself.
-
-The module docstring will be used as help string when ``hg help extensionname``
-is used and, similarly, the help string for a command and the docstring
-belonging to the function that's wrapped by the command will be shown when
-``hg help command`` is invoked.
-
-Setup Callbacks
-===============
-
-Extensions are loaded in phases. All extensions are processed in a given phase
-before the next phase begins. In the first phase, all extension modules are
-loaded and registered with Mercurial. This means that you can find all enabled
-extensions with ``extensions.find`` in the following phases.
-
-Extension setup
----------------
-
-There are two callbacks to be called when extensions are loaded, named
-``uisetup`` and ``extsetup``. ``uisetup`` is called first for each extension,
-then ``extsetup`` is called. This means ``extsetup`` can be useful in case
-one extension optionally depends on another extension.
-
-Both ``uisetup`` and ``extsetup`` receive a ui object with the local
-repository configuration::
-
-    def uisetup(ui):
-        # ...
-
-    def extsetup(ui):
-        # ...
-
-Be aware that ``uisetup`` in NOT the function to configure a ``ui`` instance.
-It's called only once per process, not per ``ui`` instance. Also, any changes
-to the ``ui`` may be discarded because the ``ui`` here temporarily loaded
-local configuration. So, it's generally wrong to do `ui.setconfig()` in
-these callbacks. Notable exception is setting ``pre/post-<command>`` hooks
-and extending ``ui.__class__``.
-
-In Mercurial 1.3.1 or earlier, ``extsetup`` takes no argument.
-
-Command table setup
--------------------
-
-After ``extsetup``, the ``cmdtable`` is copied into the global command table
-in Mercurial.
-
-Ui instance setup
------------------
-
-The optional ``uipopulate`` is called for each ``ui`` instance after
-configuration is loaded, where extensions can set up additional ui members,
-update configuration by ``ui.setconfig()``, and extend the class dynamically.
-
-Typically there are three ``ui`` instances involved in command execution:
-
-``req.ui`` (or ``repo.baseui``)
-    Only system and user configurations are loaded into it.
-``lui``
-    Local repository configuration is loaded as well. This will be used at
-    early dispatching stage where a repository isn't available.
-``repo.ui``
-    The fully-loaded ``ui`` used after a repository is instantiated. This
-    will be created from the ``req.ui`` per repository.
-
-In command server and hgweb, this may be called more than once for the same
-``ui`` instance.
-
-(New in Mercurial 4.9)
-
-Repository setup
-----------------
-
-Extensions can implement an optional callback named ``reposetup``. It is
-called after the main Mercurial repository initialization, and can be used
-to setup any local state the extension might need.
-
-As other command functions it receives an ``ui`` object and a ``repo`` object
-(no additional parameters for this, though)::
-
-    def reposetup(ui, repo):
-        #do initialization here.
-
-It is important to take into account that the ``ui`` object that is received
-by the ``reposetup`` function is not the same as the one received by the
-``uisetup`` and ``extsetup`` functions. This is particularly important when
-setting up hooks as described in the following section, since not all hooks
-use the same ``ui`` object and hence different hooks must be configured in
-different setup functions.
-
-Wrapping methods on the ui and repo classes
--------------------------------------------
-
-Because extensions can be loaded *per repository*, you should avoid using
-``extensions.wrapfunction()`` on methods of the ``ui`` and ``repo`` objects.
-Instead, create a subclass of the specific class of the instance passed into
-the ``*setup()`` hook; e.g. use ``ui.__class__`` as the base class, then
-reassign your new class to ``ui.__class__`` again. Mercurial will then use
-your updated ``ui`` or ``repo`` instance only for repositories where your
-extension is enabled (or copies thereof, reusing your new class).
-
-For example::
-
-    def uisetup(ui):
-        class echologui(ui.__class__):
-            def log(self, service, *msg, **opts):
-                if msg:
-                    self.write('%s: %s\n' % (service, msg[0] % msg[1:]))
-                super(echologui, self).log(service, *msg, **opts)
-
-        ui.__class__ = echologui
-
-Configuring Hooks
-=================
-
-Some extensions must use hooks to do their work. These required hooks can
-be configured manually by the user by modifying the ``[hook]`` section of
-their hgrc, but they can also be configured automatically by calling the
-``ui.setconfig('hooks', ...)`` function in one of the setup functions
-described above.
-
-The main difference between manually modifying the hooks section in the hgrc
-and using ``ui.setconfig()`` is that when using ``ui.setconfig()`` you have
-access to the actual hook function object, which you can pass directly to
-``ui.setconfig()``, while when you use the hooks section of the hgrc file
-you must refer to the hook function by using the
-``python:modulename.functioname`` idiom (e.g. ``python:hgext.notify.hook``).
-
-For example::
-
-    # Define hooks -- note that the actual function name it irrelevant.
-    def preupdatehook(ui, repo, **kwargs):
-        ui.write("Pre-update hook triggered\n")
-
-    def updatehook(ui, repo, **kwargs):
-        ui.write("Update hook triggered\n")
-
-    def uisetup(ui):
-        # When pre-<cmd> and post-<cmd> hooks are configured by means of
-        # the ui.setconfig() function, you must use the ui object passed
-        # to uisetup or extsetup.
-        ui.setconfig("hooks", "pre-update.myextension", preupdatehook)
-
-    def reposetup(ui, repo):
-        # Repository-specific hooks can be configured here. These include
-        # the update hook.
-        ui.setconfig("hooks", "update.myextension", updatehook)
-
-Note how different hooks may need to be configured in different setup
-functions. In the example you can see that the ``update`` hook must be
-configured in the ``reposetup`` function, while the ``pre-update`` hook
-must be configured on the ``uisetup`` or the ``extsetup`` functions.
-
-Marking compatible versions
-===========================
-
-Every extension should use the ``testedwith`` variable to specify Mercurial
-releases it's known to be compatible with. This helps us and users diagnose
-where problems are coming from::
-
-    testedwith = '2.0 2.0.1 2.1 2.1.1 2.1.2'
-
-Do not use the ``internal`` marker in third-party extensions; we will
-immediately drop all bug reports mentioning your extension if we catch you
-doing this.
-
-Similarly, an extension can use the ``buglink`` variable to specify how users
-should report issues with the extension.  This link will be included in the
-error message if the extension produces errors::
-
-    buglink = 'https://bitbucket.org/USER/REPO/issues'
-
-If an extension requires a minimum version of Mercurial, it can be declared
-with the ``minimumhgversion`` variable::
-
-    minimumhgversion = '4.6'
-
-Older clients will print a warning that the extension requires a new version,
-instead of attempting to load it.
-
-Wrap up: what belongs where?
-============================
-
-You will find here a list of most common tasks, based on setups from the
-extensions included in Mercurial core.
-
-uisetup
--------
-
-* Changes to ``ui.__class__`` . The ``ui`` object that will be used to run
-  the command has not yet been created. Changes made here will affect ``ui``
-  objects created after this, and in particular the ``ui`` that will be passed
-  to ``runcommand``
-* Command wraps (``extensions.wrapcommand``)
-* Changes that need to be visible by other extensions: because initialization
-  occurs in phases (all extensions run ``uisetup``, then all run ``extsetup``),
-  a change made here will be visible by other extensions during ``extsetup``.
-* Monkeypatches or function wraps (``extensions.wrapfunction``) of ``dispatch``
-  module members
-* Set up ``pre-*`` and ``post-*`` hooks. (DEPRECATED. ``uipopulate`` is
-  preferred on Mercurial 4.9 and later.)
-* ``pushkey`` setup
-
-extsetup
---------
-
-* Changes depending on the status of other extensions. (``if extensions.find('mq')``)
-* Add a global option to all commands
-* Extend revsets
-
-uipopulate
-----------
-
-* Modify ``ui`` instance attributes and configuration variables.
-* Changes to ``ui.__class__`` per instance.
-* Set up all hooks per scoped configuration.
-
-reposetup
----------
-
-* Set up all hooks but ``pre-*`` and ``post-*``. (DEPRECATED. ``uipopulate`` is
-  preferred on Mercurial 4.9 and later.)
-* Modify configuration variables
-* Changes to ``repo.__class__``, ``repo.dirstate.__class__``
--- a/mercurial/help/internals/linelog.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,302 +0,0 @@
-linelog is a storage format inspired by the "Interleaved deltas" idea. See
-https://en.wikipedia.org/wiki/Interleaved_deltas for its introduction.
-
-0. SCCS Weave
-
-  To understand what linelog is, first we have a quick look at a simplified
-  (with header removed) SCCS weave format, which is an implementation of the
-  "Interleaved deltas" idea.
-
-0.1 Basic SCCS Weave File Format
-
-  A SCCS weave file consists of plain text lines. Each line is either a
-  special instruction starting with "^A" or part of the content of the real
-  file the weave tracks. There are 3 important operations, where REV denotes
-  the revision number:
-
-    ^AI REV, marking the beginning of an insertion block introduced by REV
-    ^AD REV, marking the beginning of a deletion block introduced by REV
-    ^AE REV, marking the end of the block started by "^AI REV" or "^AD REV"
-
-  Note on revision numbers: For any two different revision numbers, one must
-  be an ancestor of the other to make them comparable. This enforces linear
-  history. Besides, the comparison functions (">=", "<") should be efficient.
-  This means, if revisions are strings like git or hg, an external map is
-  required to convert them into integers.
-
-  For example, to represent the following changes:
-
-    REV 1 | REV 2 | REV 3
-    ------+-------+-------
-    a     | a     | a
-    b     | b     | 2
-    c     | 1     | c
-          | 2     |
-          | c     |
-
-  A possible weave file looks like:
-
-    ^AI 1
-    a
-    ^AD 3
-    b
-    ^AI 2
-    1
-    ^AE 3
-    2
-    ^AE 2
-    c
-    ^AE 1
-
-  An "^AE" does not always match its nearest operation ("^AI" or "^AD"). In
-  the above example, "^AE 3" does not match the nearest "^AI 2" but "^AD 3".
-  Therefore we need some extra information for "^AE". The SCCS weave uses a
-  revision number. It could also be a boolean value about whether it is an
-  insertion or a deletion (see section 0.4).
-
-0.2 Checkout
-
-  The "checkout" operation is to retrieve file content at a given revision,
-  say X. It's doable by going through the file line by line and:
-
-    - If meet ^AI rev, and rev > X, find the corresponding ^AE and jump there
-    - If meet ^AD rev, and rev <= X, find the corresponding ^AE and jump there
-    - Ignore ^AE
-    - For normal lines, just output them
-
-0.3 Annotate
-
-  The "annotate" operation is to show extra metadata like the revision number
-  and the original line number a line comes from.
-
-  It's basically just a "Checkout". For the extra metadata, they can be stored
-  side by side with the line contents. Alternatively, we can infer the
-  revision number from "^AI"s.
-
-  Some SCM tools have to calculate diffs on the fly and thus are much slower
-  on this operation.
-
-0.4 Tree Structure
-
-  The word "interleaved" is used because "^AI" .. "^AE" and "^AD" .. "^AE"
-  blocks can be interleaved.
-
-  If we consider insertions and deletions separately, they can form tree
-  structures, respectively.
-
-    +--- ^AI 1        +--- ^AD 3
-    | +- ^AI 2        | +- ^AD 2
-    | |               | |
-    | +- ^AE 2        | +- ^AE 2
-    |                 |
-    +--- ^AE 1        +--- ^AE 3
-
-  More specifically, it's possible to build a tree for all insertions, where
-  the tree node has the structure "(rev, startline, endline)". "startline" is
-  the line number of "^AI" and "endline" is the line number of the matched
-  "^AE".  The tree will have these properties:
-
-    1. child.rev > parent.rev
-    2. child.startline > parent.startline
-    3. child.endline < parent.endline
-
-  A similar tree for all deletions can also be built with the first property
-  changed to:
-
-    1. child.rev < parent.rev
-
-0.5 Malformed Cases
-
-  The following cases are considered malformed in our implementation:
-
-    1. Interleaved insertions, or interleaved deletions.
-       It can be rewritten to a non-interleaved tree structure.
-
-       Take insertions as example, deletions are similar:
-
-       ^AI x         ^AI x
-       a             a
-       ^AI x + 1  -> ^AI x + 1
-       b             b
-       ^AE x         ^AE x + 1
-       c             ^AE x
-       ^AE x + 1     ^AI x + 1
-                     c
-                     ^AE x + 1
-
-    2. Nested insertions, where the inner one has a smaller revision number.
-       Or nested deletions, where the inner one has a larger revision number.
-       It can be rewritten to a non-nested form.
-
-       Take insertions as example, deletions are similar:
-
-       ^AI x + 1     ^AI x + 1
-       a             a
-       ^AI x      -> ^AE x + 1
-       b             ^AI x
-       ^AE x         b
-       c             ^AE x
-       ^AE x + 1     ^AI x + 1
-                     c
-                     ^AE x + 1
-
-    3. Insertion inside deletion with a smaller revision number.
-
-       Rewrite by duplicating the content inserted:
-
-       ^AD x          ^AD x
-       a              a
-       ^AI x + 1  ->  b
-       b              c
-       ^AE x + 1      ^AE x
-       c              ^AI x + 1
-       ^AE x          b
-                      ^AE x + 1
-
-       Note: If "annotate" purely depends on "^AI" information, then the
-       duplication content will lose track of where "b" is originally from.
-
-  Some of them may be valid in other implementations for special purposes. For
-  example, to "revive" a previously deleted block in a newer revision.
-
-0.6 Cases Can Be Optimized
-
-  It's always better to get things nested. For example, the left is more
-  efficient than the right while they represent the same content:
-
-    +--- ^AD 2          +- ^AD 1
-    | +- ^AD 1          |   LINE A
-    | |   LINE A        +- ^AE 1
-    | +- ^AE 1          +- ^AD 2
-    |     LINE B        |   LINE B
-    +--- ^AE 2          +- ^AE 2
-
-  Our implementation sometimes generates the less efficient data. To always
-  get the optimal form, it requires extra code complexity that seems unworthy.
-
-0.7 Inefficiency
-
-  The file format can be slow because:
-
-  - Inserting a new line at position P requires rewriting all data after P.
-  - Finding "^AE" requires walking through the content (O(N), where N is the
-    number of lines between "^AI/D" and "^AE").
-
-1. Linelog
-
-  The linelog is a binary format that dedicates to speed up mercurial (or
-  git)'s "annotate" operation. It's designed to avoid issues mentioned in
-  section 0.7.
-
-1.1 Content Stored
-
-  Linelog is not another storage for file contents. It only stores line
-  numbers and corresponding revision numbers, instead of actual line content.
-  This is okay for the "annotate" operation because usually the external
-  source is fast to checkout the content of a file at a specific revision.
-
-  A typical SCCS weave is also fast on the "grep" operation, which needs
-  random accesses to line contents from different revisions of a file. This
-  can be slow with linelog's no-line-content design. However we could use
-  an extra map ((rev, line num) -> line content) to speed it up.
-
-  Note the revision numbers in linelog should be independent from mercurial
-  integer revision numbers. There should be some mapping between linelog rev
-  and hg hash stored side by side, to make the files reusable after being
-  copied to another machine.
-
-1.2 Basic Format
-
-  A linelog file consists of "instruction"s. An "instruction" can be either:
-
-    - JGE  REV ADDR     # jump to ADDR if rev >= REV
-    - JL   REV ADDR     # jump to ADDR if rev < REV
-    - LINE REV LINENUM  # append the (LINENUM+1)-th line in revision REV
-
-  For example, here is the example linelog representing the same file with
-  3 revisions mentioned in section 0.1:
-
-    SCCS  |    Linelog
-    Weave | Addr : Instruction
-    ------+------+-------------
-    ^AI 1 |    0 : JL   1 8
-    a     |    1 : LINE 1 0
-    ^AD 3 |    2 : JGE  3 6
-    b     |    3 : LINE 1 1
-    ^AI 2 |    4 : JL   2 7
-    1     |    5 : LINE 2 2
-    ^AE 3 |
-    2     |    6 : LINE 2 3
-    ^AE 2 |
-    c     |    7 : LINE 1 2
-    ^AE 1 |
-          |    8 : END
-
-  This way, "find ^AE" is O(1) because we just jump there. And we can insert
-  new lines without rewriting most part of the file by appending new lines and
-  changing a single instruction to jump to them.
-
-  The current implementation uses 64 bits for an instruction: The opcode (JGE,
-  JL or LINE) takes 2 bits, REV takes 30 bits and ADDR or LINENUM takes 32
-  bits. It also stores the max revision number and buffer size at the first
-  64 bits for quick access to these values.
-
-1.3 Comparing with Mercurial's revlog format
-
-  Apparently, linelog is very different from revlog: linelog stores rev and
-  line numbers, while revlog has line contents and other metadata (like
-  parents, flags). However, the revlog format could also be used to store rev
-  and line numbers. For example, to speed up the annotate operation, we could
-  also pre-calculate annotate results and just store them using the revlog
-  format.
-
-  Therefore, linelog is actually somehow similar to revlog, with the important
-  trade-off that it only supports linear history (mentioned in section 0.1).
-  Essentially, the differences are:
-
-    a) Linelog is full of deltas, while revlog could contain full file
-       contents sometimes. So linelog is smaller. Revlog could trade
-       reconstruction speed for file size - best case, revlog is as small as
-       linelog.
-    b) The interleaved delta structure allows skipping large portion of
-       uninteresting deltas so linelog's content reconstruction is faster than
-       the delta-only version of revlog (however it's possible to construct
-       a case where interleaved deltas degrade to plain deltas, so linelog
-       worst case would be delta-only revlog). Revlog could trade file size
-       for reconstruction speed.
-    c) Linelog implicitly maintains the order of all lines it stores. So it
-       could dump all the lines from all revisions, with a reasonable order.
-       While revlog could also dump all line additions, it requires extra
-       computation to figure out the order putting those lines - that's some
-       kind of "merge".
-
-  "c" makes "hg absorb" easier to implement and makes it possible to do
-  "annotate --deleted".
-
-1.4 Malformed Cases Handling
-
-  The following "case 1", "case 2", and "case 3" refer to cases mentioned
-  in section 0.5.
-
-  Using the exposed API (replacelines), case 1 is impossible to generate,
-  although it's possible to generate it by constructing rawdata and load that
-  via linelog.fromdata.
-
-  Doing annotate(maxrev) before replacelines (aka. a1, a2 passed to
-  replacelines are related to the latest revision) eliminates the possibility
-  of case 3. That makes sense since usually you'd like to make edits on top of
-  the latest revision. Practically, both absorb and fastannotate do this.
-
-  Doing annotate(maxrev), plus replacelines(rev, ...) where rev >= maxrev
-  eliminates the possibility of case 2. That makes sense since usually the
-  edits belong to "new revisions", not "old revisions". Practically,
-  fastannotate does this. Absorb calls replacelines with rev < maxrev to edit
-  past revisions. So it needs some extra care to not generate case 2.
-
-  If case 1 occurs, that probably means linelog file corruption (assuming
-  linelog is edited via public APIs) the checkout or annotate result could
-  be less meaningful or even error out, but linelog wouldn't enter an infinite
-  loop.
-
-  If either case 2 or 3 occurs, linelog works as if the inner "^AI/D" and "^AE"
-  operations on the left side are silently ignored.
--- a/mercurial/help/internals/mergestate.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-The active mergestate is stored in ``.hg/merge`` when a merge is triggered
-by commands like ``hg merge``, ``hg rebase``, etc. until the merge is
-completed or aborted to track the 3-way merge state of individual files.
-
-The contents of the directory are:
-
-Conflicting files
------------------
-
-The local version of the conflicting files are stored with their
-filenames as the hash of their paths.
-
-state
------
-
-This mergestate file record is used by hg version prior to 2.9.1
-and contains less data than ``state2``. If there is no contradiction
-with ``state2``, we can assume that both are written at the same time.
-In this case, data from ``state2`` is used. Otherwise, we use ``state``.
-We read/write both ``state`` and ``state2`` records to ensure backward
-compatibility.
-
-state2
-------
-
-This record stores a superset of data in ``state``, including new kinds
-of records in the future.
-
-Each record can contain arbitrary content and has an associated type. This
-`type` should be a letter. If `type` is uppercase, the record is mandatory:
-versions of Mercurial that don't support it should abort. If `type` is
-lowercase, the record can be safely ignored.
-
-Currently known records:
-
-| * L: the node of the "local" part of the merge (hexified version)
-| * O: the node of the "other" part of the merge (hexified version)
-| * F: a file to be merged entry
-| * C: a change/delete or delete/change conflict
-| * D: a file that the external merge driver will merge internally
-|      (experimental)
-| * P: a path conflict (file vs directory)
-| * m: the external merge driver defined for this merge plus its run state
-|      (experimental)
-| * f: a (filename, dictionary) tuple of optional values for a given file
-| * X: unsupported mandatory record type (used in tests)
-| * x: unsupported advisory record type (used in tests)
-| * l: the labels for the parts of the merge.
-
-Merge driver run states (experimental):
-
-| * u: driver-resolved files unmarked -- needs to be run next time we're
-|      about to resolve or commit
-| * m: driver-resolved files marked -- only needs to be run before commit
-| * s: success/skipped -- does not need to be run any more
-
-Merge record states (indexed by filename):
-
-| * u: unresolved conflict
-| * r: resolved conflict
-| * pu: unresolved path conflict (file conflicts with directory)
-| * pr: resolved path conflict
-| * d: driver-resolved conflict
-
-The resolve command transitions between 'u' and 'r' for conflicts and
-'pu' and 'pr' for path conflicts.
-
-This format is a list of arbitrary records of the form:
-
-[type][length][content]
-
-`type` is a single character, `length` is a 4 byte integer, and
-`content` is an arbitrary byte sequence of length `length`.
-
-Mercurial versions prior to 3.7 have a bug where if there are
-unsupported mandatory merge records, attempting to clear out the merge
-state with hg update --clean or similar aborts. The 't' record type
-works around that by writing out what those versions treat as an
-advisory record, but later versions interpret as special: the first
-character is the 'real' record type and everything onwards is the data.
--- a/mercurial/help/internals/requirements.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,144 +0,0 @@
-Repositories contain a file (``.hg/requires``) containing a list of
-features/capabilities that are *required* for clients to interface
-with the repository. This file has been present in Mercurial since
-version 0.9.2 (released December 2006).
-
-One of the first things clients do when opening a repository is read
-``.hg/requires`` and verify that all listed requirements are supported,
-aborting if not. Requirements are therefore a strong mechanism to
-prevent incompatible clients from reading from unknown repository
-formats or even corrupting them by writing to them.
-
-Extensions may add requirements. When they do this, clients not running
-an extension will be unable to read from repositories.
-
-The following sections describe the requirements defined by the
-Mercurial core distribution.
-
-revlogv1
-========
-
-When present, revlogs are version 1 (RevlogNG). RevlogNG was introduced
-in 2006. The ``revlogv1`` requirement has been enabled by default
-since the ``requires`` file was introduced in Mercurial 0.9.2.
-
-If this requirement is not present, version 0 revlogs are assumed.
-
-store
-=====
-
-The *store* repository layout should be used.
-
-This requirement has been enabled by default since the ``requires`` file
-was introduced in Mercurial 0.9.2.
-
-fncache
-=======
-
-The *fncache* repository layout should be used.
-
-The *fncache* layout hash encodes filenames with long paths and
-encodes reserved filenames.
-
-This requirement is enabled by default when the *store* requirement is
-enabled (which is the default behavior). It was introduced in Mercurial
-1.1 (released December 2008).
-
-shared
-======
-
-Denotes that the store for a repository is shared from another location
-(defined by the ``.hg/sharedpath`` file).
-
-This requirement is set when a repository is created via :hg:`share`.
-
-The requirement was added in Mercurial 1.3 (released July 2009).
-
-relshared
-=========
-
-Derivative of ``shared``; the location of the store is relative to the
-store of this repository.
-
-This requirement is set when a repository is created via :hg:`share`
-using the ``--relative`` option.
-
-The requirement was added in Mercurial 4.2 (released May 2017).
-
-dotencode
-=========
-
-The *dotencode* repository layout should be used.
-
-The *dotencode* layout encodes the first period or space in filenames
-to prevent issues on OS X and Windows.
-
-This requirement is enabled by default when the *store* requirement
-is enabled (which is the default behavior). It was introduced in
-Mercurial 1.7 (released November 2010).
-
-parentdelta
-===========
-
-Denotes a revlog delta encoding format that was experimental and
-replaced by *generaldelta*. It should not be seen in the wild because
-it was never enabled by default.
-
-This requirement was added in Mercurial 1.7 and removed in Mercurial
-1.9.
-
-generaldelta
-============
-
-Revlogs should be created with the *generaldelta* flag enabled. The
-generaldelta flag will cause deltas to be encoded against a parent
-revision instead of the previous revision in the revlog.
-
-Support for this requirement was added in Mercurial 1.9 (released
-July 2011). The requirement was disabled on new repositories by
-default until Mercurial 3.7 (released February 2016).
-
-manifestv2
-==========
-
-Denotes that version 2 of manifests are being used.
-
-Support for this requirement was added in Mercurial 3.4 (released
-May 2015). The new format failed to meet expectations and support
-for the format and requirement were removed in Mercurial 4.6
-(released May 2018) since the feature never graduated frome experiment
-status.
-
-treemanifest
-============
-
-Denotes that tree manifests are being used. Tree manifests are
-one manifest per directory (as opposed to a single flat manifest).
-
-Support for this requirement was added in Mercurial 3.4 (released
-August 2015). The requirement is currently experimental and is
-disabled by default.
-
-exp-sparse
-==========
-
-The working directory is sparse (only contains a subset of files).
-
-Support for this requirement was added in Mercurial 4.3 (released
-August 2017). This requirement and feature are experimental and may
-disappear in a future Mercurial release. The requirement will only
-be present on repositories that have opted in to a sparse working
-directory.
-
-bookmarksinstore
-==================
-
-Bookmarks are stored in ``.hg/store/`` instead of directly in ``.hg/``
-where they used to be stored. The active bookmark is still stored
-directly in ``.hg/``. This makes them always shared by ``hg share``,
-whether or not ``-B`` was passed.
-
-Support for this requirement was added in Mercurial 5.1 (released
-August 2019). The requirement will only be present on repositories
-that have opted in to this format (by having
-``format.bookmarks-in-store=true`` set when they were created).
--- a/mercurial/help/internals/revlogs.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,239 +0,0 @@
-Revision logs - or *revlogs* - are an append only data structure for
-storing discrete entries, or *revisions*. They are the primary storage
-mechanism of repository data.
-
-Revlogs effectively model a directed acyclic graph (DAG). Each node
-has edges to 1 or 2 *parent* nodes. Each node contains metadata and
-the raw value for that node.
-
-Revlogs consist of entries which have metadata and revision data.
-Metadata includes the hash of the revision's content, sizes, and
-links to its *parent* entries. The collective metadata is referred
-to as the *index* and the revision data is the *data*.
-
-Revision data is stored as a series of compressed deltas against
-ancestor revisions.
-
-Revlogs are written in an append-only fashion. We never need to rewrite
-a file to insert nor do we need to remove data. Rolling back in-progress
-writes can be performed by truncating files. Read locks can be avoided
-using simple techniques. This means that references to other data in
-the same revlog *always* refer to a previous entry.
-
-Revlogs can be modeled as 0-indexed arrays. The first revision is
-revision #0 and the second is revision #1. The revision -1 is typically
-used to mean *does not exist* or *not defined*.
-
-File Format
-===========
-
-A revlog begins with a 32-bit big endian integer holding version info
-and feature flags. This integer overlaps with the first four bytes of
-the first revision entry.
-
-This integer is logically divided into 2 16-bit shorts. The least
-significant half of the integer is the format/version short. The other
-short holds feature flags that dictate behavior of the revlog.
-
-The following values for the format/version short are defined:
-
-0
-   The original revlog version.
-1
-   RevlogNG (*next generation*). It replaced version 0 when it was
-   implemented in 2006.
-2
-   In-development version incorporating accumulated knowledge and
-   missing features from 10+ years of revlog version 1.
-57005 (0xdead)
-   Reserved for internal testing of new versions. No defined format
-   beyond 32-bit header.
-
-The feature flags short consists of bit flags. Where 0 is the least
-significant bit. The bit flags vary by revlog version.
-
-Version 0 revlogs have no defined flags and the presence of a flag
-is considered an error.
-
-Version 1 revlogs have the following flags at the specified bit offsets:
-
-0
-   Store revision data inline.
-1
-   Generaldelta encoding.
-
-Version 2 revlogs have the following flags at the specified bit offsets:
-
-0
-   Store revision data inline.
-
-The following header values are common:
-
-00 00 00 01
-   v1
-00 01 00 01
-   v1 + inline
-00 02 00 01
-   v1 + generaldelta
-00 03 00 01
-   v1 + inline + generaldelta
-
-Following the 32-bit header is the remaining 60 bytes of the first index
-entry. Following that are additional *index* entries. Inlined revision
-data is possibly located between index entries. More on this inlined
-layout is described below.
-
-Version 1 Format
-================
-
-Version 1 (RevlogNG) begins with an index describing the revisions in
-the revlog. If the ``inline`` flag is set, revision data is stored inline,
-or between index entries (as opposed to in a separate container).
-
-Each index entry is 64 bytes. The byte layout of each entry is as
-follows, with byte 0 being the first byte (all data stored as big endian):
-
-0-3 (4 bytes) (rev 0 only)
-   Revlog header
-
-0-5 (6 bytes)
-   Absolute offset of revision data from beginning of revlog.
-
-6-7 (2 bytes)
-   Bit flags impacting revision behavior. The following bit offsets define:
-
-   0: REVIDX_ISCENSORED revision has censor metadata, must be verified.
-
-   1: REVIDX_ELLIPSIS revision hash does not match its data. Used by
-   narrowhg
-
-   2: REVIDX_EXTSTORED revision data is stored externally.
-
-8-11 (4 bytes)
-   Compressed length of revision data / chunk as stored in revlog.
-
-12-15 (4 bytes)
-   Uncompressed length of revision data. This is the size of the full
-   revision data, not the size of the chunk post decompression.
-
-16-19 (4 bytes)
-   Base or previous revision this revision's delta was produced against.
-   This revision holds full text (as opposed to a delta) if it points to
-   itself. For generaldelta repos, this is the previous revision in the
-   delta chain. For non-generaldelta repos, this is the base or first
-   revision in the delta chain.
-
-20-23 (4 bytes)
-   A revision this revision is *linked* to. This allows a revision in
-   one revlog to be forever associated with a revision in another
-   revlog. For example, a file's revlog may point to the changelog
-   revision that introduced it.
-
-24-27 (4 bytes)
-   Revision of 1st parent. -1 indicates no parent.
-
-28-31 (4 bytes)
-   Revision of 2nd parent. -1 indicates no 2nd parent.
-
-32-63 (32 bytes)
-   Hash of revision's full text. Currently, SHA-1 is used and only
-   the first 20 bytes of this field are used. The rest of the bytes
-   are ignored and should be stored as \0.
-
-If inline revision data is being stored, the compressed revision data
-(of length from bytes offset 8-11 from the index entry) immediately
-follows the index entry. There is no header on the revision data. There
-is no padding between it and the index entries before and after.
-
-If revision data is not inline, then raw revision data is stored in a
-separate byte container. The offsets from bytes 0-5 and the compressed
-length from bytes 8-11 define how to access this data.
-
-The 6 byte absolute offset field from the first revlog entry overlaps
-with the revlog header. That is, the first 6 bytes of the first revlog
-entry can be split into four bytes containing the header for the revlog
-file and an additional two bytes containing the offset for the first
-entry. Since this is the offset from the beginning of the file for the
-first revision entry, the two bytes will always be set to zero.
-
-Version 2 Format
-================
-
-(In development. Format not finalized or stable.)
-
-Version 2 is identical to version 2 with the following differences.
-
-There is no dedicated *generaldelta* revlog format flag. Instead,
-the feature is implied enabled by default.
-
-Delta Chains
-============
-
-Revision data is encoded as a chain of *chunks*. Each chain begins with
-the compressed original full text for that revision. Each subsequent
-*chunk* is a *delta* against the previous revision. We therefore call
-these chains of chunks/deltas *delta chains*.
-
-The full text for a revision is reconstructed by loading the original
-full text for the base revision of a *delta chain* and then applying
-*deltas* until the target revision is reconstructed.
-
-*Delta chains* are limited in length so lookup time is bound. They are
-limited to ~2x the length of the revision's data. The linear distance
-between the base chunk and the final chunk is also limited so the
-amount of read I/O to load all chunks in the delta chain is bound.
-
-Deltas and delta chains are either computed against the previous
-revision in the revlog or another revision (almost certainly one of
-the parents of the revision). Historically, deltas were computed against
-the previous revision. The *generaldelta* revlog feature flag (enabled
-by default in Mercurial 3.7) activates the mode where deltas are
-computed against an arbitrary revision (almost certainly a parent revision).
-
-File Storage
-============
-
-Revlogs logically consist of an index (metadata of entries) and
-revision data. This data may be stored together in a single file or in
-separate files. The mechanism used is indicated by the ``inline`` feature
-flag on the revlog.
-
-Mercurial's behavior is to use inline storage until a revlog reaches a
-certain size, at which point it will be converted to non-inline. The
-reason there is a size limit on inline storage is to establish an upper
-bound on how much data must be read to load the index. It would be a waste
-to read tens or hundreds of extra megabytes of data just to access the
-index data.
-
-The actual layout of revlog files on disk is governed by the repository's
-*store format*. Typically, a ``.i`` file represents the index revlog
-(possibly containing inline data) and a ``.d`` file holds the revision data.
-
-Revision Entries
-================
-
-Revision entries consist of an optional 1 byte header followed by an
-encoding of the revision data. The headers are as follows:
-
-\0 (0x00)
-   Revision data is the entirety of the entry, including this header.
-u (0x75)
-   Raw revision data follows.
-x (0x78)
-   zlib (RFC 1950) data.
-
-   The 0x78 value is actually the first byte of the zlib header (CMF byte).
-
-Hash Computation
-================
-
-The hash of the revision is stored in the index and is used both as a primary
-key and for data integrity verification.
-
-Currently, SHA-1 is the only supported hashing algorithm. To obtain the SHA-1
-hash of a revision:
-
-1. Hash the parent nodes
-2. Hash the fulltext of the revision
-
-The 20 byte node ids of the parents are fed into the hasher in ascending order.
--- a/mercurial/help/internals/wireprotocol.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1277 +0,0 @@
-The Mercurial wire protocol is a request-response based protocol
-with multiple wire representations.
-
-Each request is modeled as a command name, a dictionary of arguments, and
-optional raw input. Command arguments and their types are intrinsic
-properties of commands. So is the response type of the command. This means
-clients can't always send arbitrary arguments to servers and servers can't
-return multiple response types.
-
-The protocol is synchronous and does not support multiplexing (concurrent
-commands).
-
-Handshake
-=========
-
-It is required or common for clients to perform a *handshake* when connecting
-to a server. The handshake serves the following purposes:
-
-* Negotiating protocol/transport level options
-* Allows the client to learn about server capabilities to influence
-  future requests
-* Ensures the underlying transport channel is in a *clean* state
-
-An important goal of the handshake is to allow clients to use more modern
-wire protocol features. By default, clients must assume they are talking
-to an old version of Mercurial server (possibly even the very first
-implementation). So, clients should not attempt to call or utilize modern
-wire protocol features until they have confirmation that the server
-supports them. The handshake implementation is designed to allow both
-ends to utilize the latest set of features and capabilities with as
-few round trips as possible.
-
-The handshake mechanism varies by transport and protocol and is documented
-in the sections below.
-
-HTTP Protocol
-=============
-
-Handshake
----------
-
-The client sends a ``capabilities`` command request (``?cmd=capabilities``)
-as soon as HTTP requests may be issued.
-
-By default, the server responds with a version 1 capabilities string, which
-the client parses to learn about the server's abilities. The ``Content-Type``
-for this response is ``application/mercurial-0.1`` or
-``application/mercurial-0.2`` depending on whether the client advertised
-support for version ``0.2`` in its request. (Clients aren't supposed to
-advertise support for ``0.2`` until the capabilities response indicates
-the server's support for that media type. However, a client could
-conceivably cache this metadata and issue the capabilities request in such
-a way to elicit an ``application/mercurial-0.2`` response.)
-
-Clients wishing to switch to a newer API service may send an
-``X-HgUpgrade-<X>`` header containing a space-delimited list of API service
-names the client is capable of speaking. The request MUST also include an
-``X-HgProto-<X>`` header advertising a known serialization format for the
-response. ``cbor`` is currently the only defined serialization format.
-
-If the request contains these headers, the response ``Content-Type`` MAY
-be for a different media type. e.g. ``application/mercurial-cbor`` if the
-client advertises support for CBOR.
-
-The response MUST be deserializable to a map with the following keys:
-
-apibase
-   URL path to API services, relative to the repository root. e.g. ``api/``.
-
-apis
-   A map of API service names to API descriptors. An API descriptor contains
-   more details about that API. In the case of the HTTP Version 2 Transport,
-   it will be the normal response to a ``capabilities`` command.
-
-   Only the services advertised by the client that are also available on
-   the server are advertised.
-
-v1capabilities
-   The capabilities string that would be returned by a version 1 response.
-
-The client can then inspect the server-advertised APIs and decide which
-API to use, including continuing to use the HTTP Version 1 Transport.
-
-HTTP Version 1 Transport
-------------------------
-
-Commands are issued as HTTP/1.0 or HTTP/1.1 requests. Commands are
-sent to the base URL of the repository with the command name sent in
-the ``cmd`` query string parameter. e.g.
-``https://example.com/repo?cmd=capabilities``. The HTTP method is ``GET``
-or ``POST`` depending on the command and whether there is a request
-body.
-
-Command arguments can be sent multiple ways.
-
-The simplest is part of the URL query string using ``x-www-form-urlencoded``
-encoding (see Python's ``urllib.urlencode()``. However, many servers impose
-length limitations on the URL. So this mechanism is typically only used if
-the server doesn't support other mechanisms.
-
-If the server supports the ``httpheader`` capability, command arguments can
-be sent in HTTP request headers named ``X-HgArg-<N>`` where ``<N>`` is an
-integer starting at 1. A ``x-www-form-urlencoded`` representation of the
-arguments is obtained. This full string is then split into chunks and sent
-in numbered ``X-HgArg-<N>`` headers. The maximum length of each HTTP header
-is defined by the server in the ``httpheader`` capability value, which defaults
-to ``1024``. The server reassembles the encoded arguments string by
-concatenating the ``X-HgArg-<N>`` headers then URL decodes them into a
-dictionary.
-
-The list of ``X-HgArg-<N>`` headers should be added to the ``Vary`` request
-header to instruct caches to take these headers into consideration when caching
-requests.
-
-If the server supports the ``httppostargs`` capability, the client
-may send command arguments in the HTTP request body as part of an
-HTTP POST request. The command arguments will be URL encoded just like
-they would for sending them via HTTP headers. However, no splitting is
-performed: the raw arguments are included in the HTTP request body.
-
-The client sends a ``X-HgArgs-Post`` header with the string length of the
-encoded arguments data. Additional data may be included in the HTTP
-request body immediately following the argument data. The offset of the
-non-argument data is defined by the ``X-HgArgs-Post`` header. The
-``X-HgArgs-Post`` header is not required if there is no argument data.
-
-Additional command data can be sent as part of the HTTP request body. The
-default ``Content-Type`` when sending data is ``application/mercurial-0.1``.
-A ``Content-Length`` header is currently always sent.
-
-Example HTTP requests::
-
-    GET /repo?cmd=capabilities
-    X-HgArg-1: foo=bar&baz=hello%20world
-
-The request media type should be chosen based on server support. If the
-``httpmediatype`` server capability is present, the client should send
-the newest mutually supported media type. If this capability is absent,
-the client must assume the server only supports the
-``application/mercurial-0.1`` media type.
-
-The ``Content-Type`` HTTP response header identifies the response as coming
-from Mercurial and can also be used to signal an error has occurred.
-
-The ``application/mercurial-*`` media types indicate a generic Mercurial
-data type.
-
-The ``application/mercurial-0.1`` media type is raw Mercurial data. It is the
-predecessor of the format below.
-
-The ``application/mercurial-0.2`` media type is compression framed Mercurial
-data. The first byte of the payload indicates the length of the compression
-format identifier that follows. Next are N bytes indicating the compression
-format. e.g. ``zlib``. The remaining bytes are compressed according to that
-compression format. The decompressed data behaves the same as with
-``application/mercurial-0.1``.
-
-The ``application/hg-error`` media type indicates a generic error occurred.
-The content of the HTTP response body typically holds text describing the
-error.
-
-The ``application/mercurial-cbor`` media type indicates a CBOR payload
-and should be interpreted as identical to ``application/cbor``.
-
-Behavior of media types is further described in the ``Content Negotiation``
-section below.
-
-Clients should issue a ``User-Agent`` request header that identifies the client.
-The server should not use the ``User-Agent`` for feature detection.
-
-A command returning a ``string`` response issues a
-``application/mercurial-0.*`` media type and the HTTP response body contains
-the raw string value (after compression decoding, if used). A
-``Content-Length`` header is typically issued, but not required.
-
-A command returning a ``stream`` response issues a
-``application/mercurial-0.*`` media type and the HTTP response is typically
-using *chunked transfer* (``Transfer-Encoding: chunked``).
-
-HTTP Version 2 Transport
-------------------------
-
-**Experimental - feature under active development**
-
-Version 2 of the HTTP protocol is exposed under the ``/api/*`` URL space.
-It's final API name is not yet formalized.
-
-Commands are triggered by sending HTTP POST requests against URLs of the
-form ``<permission>/<command>``, where ``<permission>`` is ``ro`` or
-``rw``, meaning read-only and read-write, respectively and ``<command>``
-is a named wire protocol command.
-
-Non-POST request methods MUST be rejected by the server with an HTTP
-405 response.
-
-Commands that modify repository state in meaningful ways MUST NOT be
-exposed under the ``ro`` URL prefix. All available commands MUST be
-available under the ``rw`` URL prefix.
-
-Server adminstrators MAY implement blanket HTTP authentication keyed
-off the URL prefix. For example, a server may require authentication
-for all ``rw/*`` URLs and let unauthenticated requests to ``ro/*``
-URL proceed. A server MAY issue an HTTP 401, 403, or 407 response
-in accordance with RFC 7235. Clients SHOULD recognize the HTTP Basic
-(RFC 7617) and Digest (RFC 7616) authentication schemes. Clients SHOULD
-make an attempt to recognize unknown schemes using the
-``WWW-Authenticate`` response header on a 401 response, as defined by
-RFC 7235.
-
-Read-only commands are accessible under ``rw/*`` URLs so clients can
-signal the intent of the operation very early in the connection
-lifecycle. For example, a ``push`` operation - which consists of
-various read-only commands mixed with at least one read-write command -
-can perform all commands against ``rw/*`` URLs so that any server-side
-authentication requirements are discovered upon attempting the first
-command - not potentially several commands into the exchange. This
-allows clients to fail faster or prompt for credentials as soon as the
-exchange takes place. This provides a better end-user experience.
-
-Requests to unknown commands or URLS result in an HTTP 404.
-TODO formally define response type, how error is communicated, etc.
-
-HTTP request and response bodies use the ``hgrpc`` protocol for media
-exchange.` (See :hg:`help internals.wireprotocolrpc` for details of
-the protocol.) The entirety of the HTTP message body is 0 or more frames
-as defined by this protocol.
-
-Clients and servers MUST advertise the ``TBD`` media type via the
-``Content-Type`` request and response headers. In addition, clients MUST
-advertise this media type value in their ``Accept`` request header in all
-requests.
-TODO finalize the media type. For now, it is defined in wireprotoserver.py.
-
-Servers receiving requests without an ``Accept`` header SHOULD respond with
-an HTTP 406.
-
-Servers receiving requests with an invalid ``Content-Type`` header SHOULD
-respond with an HTTP 415.
-
-The command to run is specified in the POST payload as defined by ``hgrpc``.
-This is redundant with data already encoded in the URL. This is by design,
-so server operators can have better understanding about server activity from
-looking merely at HTTP access logs.
-
-In most circumstances, the command specified in the URL MUST match
-the command specified in the frame-based payload or the server will
-respond with an error. The exception to this is the special
-``multirequest`` URL. (See below.) In addition, HTTP requests
-are limited to one command invocation. The exception is the special
-``multirequest`` URL.
-
-The ``multirequest`` command endpoints (``ro/multirequest`` and
-``rw/multirequest``) are special in that they allow the execution of
-*any* command and allow the execution of multiple commands. If the
-HTTP request issues multiple commands across multiple frames, all
-issued commands will be processed by the server. Per the defined
-behavior of ``hgrpc```, commands may be issued interleaved and responses
-may come back in a different order than they were issued. Clients MUST
-be able to deal with this.
-
-SSH Protocol
-============
-
-Handshake
----------
-
-For all clients, the handshake consists of the client sending 1 or more
-commands to the server using version 1 of the transport. Servers respond
-to commands they know how to respond to and send an empty response (``0\n``)
-for unknown commands (per standard behavior of version 1 of the transport).
-Clients then typically look for a response to the newest sent command to
-determine which transport version to use and what the available features for
-the connection and server are.
-
-Preceding any response from client-issued commands, the server may print
-non-protocol output. It is common for SSH servers to print banners, message
-of the day announcements, etc when clients connect. It is assumed that any
-such *banner* output will precede any Mercurial server output. So clients
-must be prepared to handle server output on initial connect that isn't
-in response to any client-issued command and doesn't conform to Mercurial's
-wire protocol. This *banner* output should only be on stdout. However,
-some servers may send output on stderr.
-
-Pre 0.9.1 clients issue a ``between`` command with the ``pairs`` argument
-having the value
-``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``.
-
-The ``between`` command has been supported since the original Mercurial
-SSH server. Requesting the empty range will return a ``\n`` string response,
-which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline
-followed by the value, which happens to be a newline).
-
-For pre 0.9.1 clients and all servers, the exchange looks like::
-
-   c: between\n
-   c: pairs 81\n
-   c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
-   s: 1\n
-   s: \n
-
-0.9.1+ clients send a ``hello`` command (with no arguments) before the
-``between`` command. The response to this command allows clients to
-discover server capabilities and settings.
-
-An example exchange between 0.9.1+ clients and a ``hello`` aware server looks
-like::
-
-   c: hello\n
-   c: between\n
-   c: pairs 81\n
-   c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
-   s: 324\n
-   s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
-   s: 1\n
-   s: \n
-
-And a similar scenario but with servers sending a banner on connect::
-
-   c: hello\n
-   c: between\n
-   c: pairs 81\n
-   c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
-   s: welcome to the server\n
-   s: if you find any issues, email someone@somewhere.com\n
-   s: 324\n
-   s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
-   s: 1\n
-   s: \n
-
-Note that output from the ``hello`` command is terminated by a ``\n``. This is
-part of the response payload and not part of the wire protocol adding a newline
-after responses. In other words, the length of the response contains the
-trailing ``\n``.
-
-Clients supporting version 2 of the SSH transport send a line beginning
-with ``upgrade`` before the ``hello`` and ``between`` commands. The line
-(which isn't a well-formed command line because it doesn't consist of a
-single command name) serves to both communicate the client's intent to
-switch to transport version 2 (transports are version 1 by default) as
-well as to advertise the client's transport-level capabilities so the
-server may satisfy that request immediately.
-
-The upgrade line has the form:
-
-    upgrade <token> <transport capabilities>
-
-That is the literal string ``upgrade`` followed by a space, followed by
-a randomly generated string, followed by a space, followed by a string
-denoting the client's transport capabilities.
-
-The token can be anything. However, a random UUID is recommended. (Use
-of version 4 UUIDs is recommended because version 1 UUIDs can leak the
-client's MAC address.)
-
-The transport capabilities string is a URL/percent encoded string
-containing key-value pairs defining the client's transport-level
-capabilities. The following capabilities are defined:
-
-proto
-   A comma-delimited list of transport protocol versions the client
-   supports. e.g. ``ssh-v2``.
-
-If the server does not recognize the ``upgrade`` line, it should issue
-an empty response and continue processing the ``hello`` and ``between``
-commands. Here is an example handshake between a version 2 aware client
-and a non version 2 aware server:
-
-   c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2
-   c: hello\n
-   c: between\n
-   c: pairs 81\n
-   c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
-   s: 0\n
-   s: 324\n
-   s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
-   s: 1\n
-   s: \n
-
-(The initial ``0\n`` line from the server indicates an empty response to
-the unknown ``upgrade ..`` command/line.)
-
-If the server recognizes the ``upgrade`` line and is willing to satisfy that
-upgrade request, it replies to with a payload of the following form:
-
-   upgraded <token> <transport name>\n
-
-This line is the literal string ``upgraded``, a space, the token that was
-specified by the client in its ``upgrade ...`` request line, a space, and the
-name of the transport protocol that was chosen by the server. The transport
-name MUST match one of the names the client specified in the ``proto`` field
-of its ``upgrade ...`` request line.
-
-If a server issues an ``upgraded`` response, it MUST also read and ignore
-the lines associated with the ``hello`` and ``between`` command requests
-that were issued by the server. It is assumed that the negotiated transport
-will respond with equivalent requested information following the transport
-handshake.
-
-All data following the ``\n`` terminating the ``upgraded`` line is the
-domain of the negotiated transport. It is common for the data immediately
-following to contain additional metadata about the state of the transport and
-the server. However, this isn't strictly speaking part of the transport
-handshake and isn't covered by this section.
-
-Here is an example handshake between a version 2 aware client and a version
-2 aware server:
-
-   c:  upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2
-   c:  hello\n
-   c:  between\n
-   c:  pairs 81\n
-   c:  0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
-   s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n
-   s: <additional transport specific data>
-
-The client-issued token that is echoed in the response provides a more
-resilient mechanism for differentiating *banner* output from Mercurial
-output. In version 1, properly formatted banner output could get confused
-for Mercurial server output. By submitting a randomly generated token
-that is then present in the response, the client can look for that token
-in response lines and have reasonable certainty that the line did not
-originate from a *banner* message.
-
-SSH Version 1 Transport
------------------------
-
-The SSH transport (version 1) is a custom text-based protocol suitable for
-use over any bi-directional stream transport. It is most commonly used with
-SSH.
-
-A SSH transport server can be started with ``hg serve --stdio``. The stdin,
-stderr, and stdout file descriptors of the started process are used to exchange
-data. When Mercurial connects to a remote server over SSH, it actually starts
-a ``hg serve --stdio`` process on the remote server.
-
-Commands are issued by sending the command name followed by a trailing newline
-``\n`` to the server. e.g. ``capabilities\n``.
-
-Command arguments are sent in the following format::
-
-    <argument> <length>\n<value>
-
-That is, the argument string name followed by a space followed by the
-integer length of the value (expressed as a string) followed by a newline
-(``\n``) followed by the raw argument value.
-
-Dictionary arguments are encoded differently::
-
-    <argument> <# elements>\n
-    <key1> <length1>\n<value1>
-    <key2> <length2>\n<value2>
-    ...
-
-Non-argument data is sent immediately after the final argument value. It is
-encoded in chunks::
-
-    <length>\n<data>
-
-Each command declares a list of supported arguments and their types. If a
-client sends an unknown argument to the server, the server should abort
-immediately. The special argument ``*`` in a command's definition indicates
-that all argument names are allowed.
-
-The definition of supported arguments and types is initially made when a
-new command is implemented. The client and server must initially independently
-agree on the arguments and their types. This initial set of arguments can be
-supplemented through the presence of *capabilities* advertised by the server.
-
-Each command has a defined expected response type.
-
-A ``string`` response type is a length framed value. The response consists of
-the string encoded integer length of a value followed by a newline (``\n``)
-followed by the value. Empty values are allowed (and are represented as
-``0\n``).
-
-A ``stream`` response type consists of raw bytes of data. There is no framing.
-
-A generic error response type is also supported. It consists of a an error
-message written to ``stderr`` followed by ``\n-\n``. In addition, ``\n`` is
-written to ``stdout``.
-
-If the server receives an unknown command, it will send an empty ``string``
-response.
-
-The server terminates if it receives an empty command (a ``\n`` character).
-
-If the server announces support for the ``protocaps`` capability, the client
-should issue a ``protocaps`` command after the initial handshake to annonunce
-its own capabilities. The client capabilities are persistent.
-
-SSH Version 2 Transport
------------------------
-
-**Experimental and under development**
-
-Version 2 of the SSH transport behaves identically to version 1 of the SSH
-transport with the exception of handshake semantics. See above for how
-version 2 of the SSH transport is negotiated.
-
-Immediately following the ``upgraded`` line signaling a switch to version
-2 of the SSH protocol, the server automatically sends additional details
-about the capabilities of the remote server. This has the form:
-
-   <integer length of value>\n
-   capabilities: ...\n
-
-e.g.
-
-   s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n
-   s: 240\n
-   s: capabilities: known getbundle batch ...\n
-
-Following capabilities advertisement, the peers communicate using version
-1 of the SSH transport.
-
-Capabilities
-============
-
-Servers advertise supported wire protocol features. This allows clients to
-probe for server features before blindly calling a command or passing a
-specific argument.
-
-The server's features are exposed via a *capabilities* string. This is a
-space-delimited string of tokens/features. Some features are single words
-like ``lookup`` or ``batch``. Others are complicated key-value pairs
-advertising sub-features. e.g. ``httpheader=2048``. When complex, non-word
-values are used, each feature name can define its own encoding of sub-values.
-Comma-delimited and ``x-www-form-urlencoded`` values are common.
-
-The following document capabilities defined by the canonical Mercurial server
-implementation.
-
-batch
------
-
-Whether the server supports the ``batch`` command.
-
-This capability/command was introduced in Mercurial 1.9 (released July 2011).
-
-branchmap
----------
-
-Whether the server supports the ``branchmap`` command.
-
-This capability/command was introduced in Mercurial 1.3 (released July 2009).
-
-bundle2-exp
------------
-
-Precursor to ``bundle2`` capability that was used before bundle2 was a
-stable feature.
-
-This capability was introduced in Mercurial 3.0 behind an experimental
-flag. This capability should not be observed in the wild.
-
-bundle2
--------
-
-Indicates whether the server supports the ``bundle2`` data exchange format.
-
-The value of the capability is a URL quoted, newline (``\n``) delimited
-list of keys or key-value pairs.
-
-A key is simply a URL encoded string.
-
-A key-value pair is a URL encoded key separated from a URL encoded value by
-an ``=``. If the value is a list, elements are delimited by a ``,`` after
-URL encoding.
-
-For example, say we have the values::
-
-  {'HG20': [], 'changegroup': ['01', '02'], 'digests': ['sha1', 'sha512']}
-
-We would first construct a string::
-
-  HG20\nchangegroup=01,02\ndigests=sha1,sha512
-
-We would then URL quote this string::
-
-  HG20%0Achangegroup%3D01%2C02%0Adigests%3Dsha1%2Csha512
-
-This capability was introduced in Mercurial 3.4 (released May 2015).
-
-changegroupsubset
------------------
-
-Whether the server supports the ``changegroupsubset`` command.
-
-This capability was introduced in Mercurial 0.9.2 (released December
-2006).
-
-This capability was introduced at the same time as the ``lookup``
-capability/command.
-
-compression
------------
-
-Declares support for negotiating compression formats.
-
-Presence of this capability indicates the server supports dynamic selection
-of compression formats based on the client request.
-
-Servers advertising this capability are required to support the
-``application/mercurial-0.2`` media type in response to commands returning
-streams. Servers may support this media type on any command.
-
-The value of the capability is a comma-delimited list of strings declaring
-supported compression formats. The order of the compression formats is in
-server-preferred order, most preferred first.
-
-The identifiers used by the official Mercurial distribution are:
-
-bzip2
-   bzip2
-none
-   uncompressed / raw data
-zlib
-   zlib (no gzip header)
-zstd
-   zstd
-
-This capability was introduced in Mercurial 4.1 (released February 2017).
-
-getbundle
----------
-
-Whether the server supports the ``getbundle`` command.
-
-This capability was introduced in Mercurial 1.9 (released July 2011).
-
-httpheader
-----------
-
-Whether the server supports receiving command arguments via HTTP request
-headers.
-
-The value of the capability is an integer describing the max header
-length that clients should send. Clients should ignore any content after a
-comma in the value, as this is reserved for future use.
-
-This capability was introduced in Mercurial 1.9 (released July 2011).
-
-httpmediatype
--------------
-
-Indicates which HTTP media types (``Content-Type`` header) the server is
-capable of receiving and sending.
-
-The value of the capability is a comma-delimited list of strings identifying
-support for media type and transmission direction. The following strings may
-be present:
-
-0.1rx
-   Indicates server support for receiving ``application/mercurial-0.1`` media
-   types.
-
-0.1tx
-   Indicates server support for sending ``application/mercurial-0.1`` media
-   types.
-
-0.2rx
-   Indicates server support for receiving ``application/mercurial-0.2`` media
-   types.
-
-0.2tx
-   Indicates server support for sending ``application/mercurial-0.2`` media
-   types.
-
-minrx=X
-   Minimum media type version the server is capable of receiving. Value is a
-   string like ``0.2``.
-
-   This capability can be used by servers to limit connections from legacy
-   clients not using the latest supported media type. However, only clients
-   with knowledge of this capability will know to consult this value. This
-   capability is present so the client may issue a more user-friendly error
-   when the server has locked out a legacy client.
-
-mintx=X
-   Minimum media type version the server is capable of sending. Value is a
-   string like ``0.1``.
-
-Servers advertising support for the ``application/mercurial-0.2`` media type
-should also advertise the ``compression`` capability.
-
-This capability was introduced in Mercurial 4.1 (released February 2017).
-
-httppostargs
-------------
-
-**Experimental**
-
-Indicates that the server supports and prefers clients send command arguments
-via a HTTP POST request as part of the request body.
-
-This capability was introduced in Mercurial 3.8 (released May 2016).
-
-known
------
-
-Whether the server supports the ``known`` command.
-
-This capability/command was introduced in Mercurial 1.9 (released July 2011).
-
-lfs
----
-
-Indicates that the LFS extension is enabled on the server.  It makes no claims
-about the repository actually having LFS blobs committed to it.
-
-This capability was introduced by the LFS extension in Mercurial 4.5 (released
-Feb 2018).
-
-lfs-serve
----------
-
-Indicates that the LFS extension is enabled on the server, and LFS blobs are
-committed to the remote repository.  (Specifically, it indicates that the 'lfs'
-requirement is present in the remote repository.)
-
-This capability was introduced by the LFS extension in Mercurial 4.8 (released
-Nov 2018).
-
-lookup
-------
-
-Whether the server supports the ``lookup`` command.
-
-This capability was introduced in Mercurial 0.9.2 (released December
-2006).
-
-This capability was introduced at the same time as the ``changegroupsubset``
-capability/command.
-
-partial-pull
-------------
-
-Indicates that the client can deal with partial answers to pull requests
-by repeating the request.
-
-If this parameter is not advertised, the server will not send pull bundles.
-
-This client capability was introduced in Mercurial 4.6.
-
-protocaps
----------
-
-Whether the server supports the ``protocaps`` command for SSH V1 transport.
-
-This capability was introduced in Mercurial 4.6.
-
-pushkey
--------
-
-Whether the server supports the ``pushkey`` and ``listkeys`` commands.
-
-This capability was introduced in Mercurial 1.6 (released July 2010).
-
-standardbundle
---------------
-
-**Unsupported**
-
-This capability was introduced during the Mercurial 0.9.2 development cycle in
-2006. It was never present in a release, as it was replaced by the ``unbundle``
-capability. This capability should not be encountered in the wild.
-
-stream-preferred
-----------------
-
-If present the server prefers that clients clone using the streaming clone
-protocol (``hg clone --stream``) rather than the standard
-changegroup/bundle based protocol.
-
-This capability was introduced in Mercurial 2.2 (released May 2012).
-
-streamreqs
-----------
-
-Indicates whether the server supports *streaming clones* and the *requirements*
-that clients must support to receive it.
-
-If present, the server supports the ``stream_out`` command, which transmits
-raw revlogs from the repository instead of changegroups. This provides a faster
-cloning mechanism at the expense of more bandwidth used.
-
-The value of this capability is a comma-delimited list of repo format
-*requirements*. These are requirements that impact the reading of data in
-the ``.hg/store`` directory. An example value is
-``streamreqs=generaldelta,revlogv1`` indicating the server repo requires
-the ``revlogv1`` and ``generaldelta`` requirements.
-
-If the only format requirement is ``revlogv1``, the server may expose the
-``stream`` capability instead of the ``streamreqs`` capability.
-
-This capability was introduced in Mercurial 1.7 (released November 2010).
-
-stream
-------
-
-Whether the server supports *streaming clones* from ``revlogv1`` repos.
-
-If present, the server supports the ``stream_out`` command, which transmits
-raw revlogs from the repository instead of changegroups. This provides a faster
-cloning mechanism at the expense of more bandwidth used.
-
-This capability was introduced in Mercurial 0.9.1 (released July 2006).
-
-When initially introduced, the value of the capability was the numeric
-revlog revision. e.g. ``stream=1``. This indicates the changegroup is using
-``revlogv1``. This simple integer value wasn't powerful enough, so the
-``streamreqs`` capability was invented to handle cases where the repo
-requirements have more than just ``revlogv1``. Newer servers omit the
-``=1`` since it was the only value supported and the value of ``1`` can
-be implied by clients.
-
-unbundlehash
-------------
-
-Whether the ``unbundle`` commands supports receiving a hash of all the
-heads instead of a list.
-
-For more, see the documentation for the ``unbundle`` command.
-
-This capability was introduced in Mercurial 1.9 (released July 2011).
-
-unbundle
---------
-
-Whether the server supports pushing via the ``unbundle`` command.
-
-This capability/command has been present since Mercurial 0.9.1 (released
-July 2006).
-
-Mercurial 0.9.2 (released December 2006) added values to the capability
-indicating which bundle types the server supports receiving. This value is a
-comma-delimited list. e.g. ``HG10GZ,HG10BZ,HG10UN``. The order of values
-reflects the priority/preference of that type, where the first value is the
-most preferred type.
-
-Content Negotiation
-===================
-
-The wire protocol has some mechanisms to help peers determine what content
-types and encoding the other side will accept. Historically, these mechanisms
-have been built into commands themselves because most commands only send a
-well-defined response type and only certain commands needed to support
-functionality like compression.
-
-Currently, only the HTTP version 1 transport supports content negotiation
-at the protocol layer.
-
-HTTP requests advertise supported response formats via the ``X-HgProto-<N>``
-request header, where ``<N>`` is an integer starting at 1 allowing the logical
-value to span multiple headers. This value consists of a list of
-space-delimited parameters. Each parameter denotes a feature or capability.
-
-The following parameters are defined:
-
-0.1
-   Indicates the client supports receiving ``application/mercurial-0.1``
-   responses.
-
-0.2
-   Indicates the client supports receiving ``application/mercurial-0.2``
-   responses.
-
-cbor
-   Indicates the client supports receiving ``application/mercurial-cbor``
-   responses.
-
-   (Only intended to be used with version 2 transports.)
-
-comp
-   Indicates compression formats the client can decode. Value is a list of
-   comma delimited strings identifying compression formats ordered from
-   most preferential to least preferential. e.g. ``comp=zstd,zlib,none``.
-
-   This parameter does not have an effect if only the ``0.1`` parameter
-   is defined, as support for ``application/mercurial-0.2`` or greater is
-   required to use arbitrary compression formats.
-
-   If this parameter is not advertised, the server interprets this as
-   equivalent to ``zlib,none``.
-
-Clients may choose to only send this header if the ``httpmediatype``
-server capability is present, as currently all server-side features
-consulting this header require the client to opt in to new protocol features
-advertised via the ``httpmediatype`` capability.
-
-A server that doesn't receive an ``X-HgProto-<N>`` header should infer a
-value of ``0.1``. This is compatible with legacy clients.
-
-A server receiving a request indicating support for multiple media type
-versions may respond with any of the supported media types. Not all servers
-may support all media types on all commands.
-
-Commands
-========
-
-This section contains a list of all wire protocol commands implemented by
-the canonical Mercurial server.
-
-See :hg:`help internals.wireprotocolv2` for information on commands exposed
-to the frame-based protocol.
-
-batch
------
-
-Issue multiple commands while sending a single command request. The purpose
-of this command is to allow a client to issue multiple commands while avoiding
-multiple round trips to the server therefore enabling commands to complete
-quicker.
-
-The command accepts a ``cmds`` argument that contains a list of commands to
-execute.
-
-The value of ``cmds`` is a ``;`` delimited list of strings. Each string has the
-form ``<command> <arguments>``. That is, the command name followed by a space
-followed by an argument string.
-
-The argument string is a ``,`` delimited list of ``<key>=<value>`` values
-corresponding to command arguments. Both the argument name and value are
-escaped using a special substitution map::
-
-   : -> :c
-   , -> :o
-   ; -> :s
-   = -> :e
-
-The response type for this command is ``string``. The value contains a
-``;`` delimited list of responses for each requested command. Each value
-in this list is escaped using the same substitution map used for arguments.
-
-If an error occurs, the generic error response may be sent.
-
-between
--------
-
-(Legacy command used for discovery in old clients)
-
-Obtain nodes between pairs of nodes.
-
-The ``pairs`` arguments contains a space-delimited list of ``-`` delimited
-hex node pairs. e.g.::
-
-   a072279d3f7fd3a4aa7ffa1a5af8efc573e1c896-6dc58916e7c070f678682bfe404d2e2d68291a18
-
-Return type is a ``string``. Value consists of lines corresponding to each
-requested range. Each line contains a space-delimited list of hex nodes.
-A newline ``\n`` terminates each line, including the last one.
-
-branchmap
----------
-
-Obtain heads in named branches.
-
-Accepts no arguments. Return type is a ``string``.
-
-Return value contains lines with URL encoded branch names followed by a space
-followed by a space-delimited list of hex nodes of heads on that branch.
-e.g.::
-
-    default a072279d3f7fd3a4aa7ffa1a5af8efc573e1c896 6dc58916e7c070f678682bfe404d2e2d68291a18
-    stable baae3bf31522f41dd5e6d7377d0edd8d1cf3fccc
-
-There is no trailing newline.
-
-branches
---------
-
-(Legacy command used for discovery in old clients. Clients with ``getbundle``
-use the ``known`` and ``heads`` commands instead.)
-
-Obtain ancestor changesets of specific nodes back to a branch point.
-
-Despite the name, this command has nothing to do with Mercurial named branches.
-Instead, it is related to DAG branches.
-
-The command accepts a ``nodes`` argument, which is a string of space-delimited
-hex nodes.
-
-For each node requested, the server will find the first ancestor node that is
-a DAG root or is a merge.
-
-Return type is a ``string``. Return value contains lines with result data for
-each requested node. Each line contains space-delimited nodes followed by a
-newline (``\n``). The 4 nodes reported on each line correspond to the requested
-node, the ancestor node found, and its 2 parent nodes (which may be the null
-node).
-
-capabilities
-------------
-
-Obtain the capabilities string for the repo.
-
-Unlike the ``hello`` command, the capabilities string is not prefixed.
-There is no trailing newline.
-
-This command does not accept any arguments. Return type is a ``string``.
-
-This command was introduced in Mercurial 0.9.1 (released July 2006).
-
-changegroup
------------
-
-(Legacy command: use ``getbundle`` instead)
-
-Obtain a changegroup version 1 with data for changesets that are
-descendants of client-specified changesets.
-
-The ``roots`` arguments contains a list of space-delimited hex nodes.
-
-The server responds with a changegroup version 1 containing all
-changesets between the requested root/base nodes and the repo's head nodes
-at the time of the request.
-
-The return type is a ``stream``.
-
-changegroupsubset
------------------
-
-(Legacy command: use ``getbundle`` instead)
-
-Obtain a changegroup version 1 with data for changesetsets between
-client specified base and head nodes.
-
-The ``bases`` argument contains a list of space-delimited hex nodes.
-The ``heads`` argument contains a list of space-delimited hex nodes.
-
-The server responds with a changegroup version 1 containing all
-changesets between the requested base and head nodes at the time of the
-request.
-
-The return type is a ``stream``.
-
-clonebundles
-------------
-
-Obtains a manifest of bundle URLs available to seed clones.
-
-Each returned line contains a URL followed by metadata. See the
-documentation in the ``clonebundles`` extension for more.
-
-The return type is a ``string``.
-
-getbundle
----------
-
-Obtain a bundle containing repository data.
-
-This command accepts the following arguments:
-
-heads
-   List of space-delimited hex nodes of heads to retrieve.
-common
-   List of space-delimited hex nodes that the client has in common with the
-   server.
-obsmarkers
-   Boolean indicating whether to include obsolescence markers as part
-   of the response. Only works with bundle2.
-bundlecaps
-   Comma-delimited set of strings defining client bundle capabilities.
-listkeys
-   Comma-delimited list of strings of ``pushkey`` namespaces. For each
-   namespace listed, a bundle2 part will be included with the content of
-   that namespace.
-cg
-   Boolean indicating whether changegroup data is requested.
-cbattempted
-   Boolean indicating whether the client attempted to use the *clone bundles*
-   feature before performing this request.
-bookmarks
-   Boolean indicating whether bookmark data is requested.
-phases
-   Boolean indicating whether phases data is requested.
-
-The return type on success is a ``stream`` where the value is bundle.
-On the HTTP version 1 transport, the response is zlib compressed.
-
-If an error occurs, a generic error response can be sent.
-
-Unless the client sends a false value for the ``cg`` argument, the returned
-bundle contains a changegroup with the nodes between the specified ``common``
-and ``heads`` nodes. Depending on the command arguments, the type and content
-of the returned bundle can vary significantly.
-
-The default behavior is for the server to send a raw changegroup version
-``01`` response.
-
-If the ``bundlecaps`` provided by the client contain a value beginning
-with ``HG2``, a bundle2 will be returned. The bundle2 data may contain
-additional repository data, such as ``pushkey`` namespace values.
-
-heads
------
-
-Returns a list of space-delimited hex nodes of repository heads followed
-by a newline. e.g.
-``a9eeb3adc7ddb5006c088e9eda61791c777cbf7c 31f91a3da534dc849f0d6bfc00a395a97cf218a1\n``
-
-This command does not accept any arguments. The return type is a ``string``.
-
-hello
------
-
-Returns lines describing interesting things about the server in an RFC-822
-like format.
-
-Currently, the only line defines the server capabilities. It has the form::
-
-    capabilities: <value>
-
-See above for more about the capabilities string.
-
-SSH clients typically issue this command as soon as a connection is
-established.
-
-This command does not accept any arguments. The return type is a ``string``.
-
-This command was introduced in Mercurial 0.9.1 (released July 2006).
-
-listkeys
---------
-
-List values in a specified ``pushkey`` namespace.
-
-The ``namespace`` argument defines the pushkey namespace to operate on.
-
-The return type is a ``string``. The value is an encoded dictionary of keys.
-
-Key-value pairs are delimited by newlines (``\n``). Within each line, keys and
-values are separated by a tab (``\t``). Keys and values are both strings.
-
-lookup
-------
-
-Try to resolve a value to a known repository revision.
-
-The ``key`` argument is converted from bytes to an
-``encoding.localstr`` instance then passed into
-``localrepository.__getitem__`` in an attempt to resolve it.
-
-The return type is a ``string``.
-
-Upon successful resolution, returns ``1 <hex node>\n``. On failure,
-returns ``0 <error string>\n``. e.g.::
-
-   1 273ce12ad8f155317b2c078ec75a4eba507f1fba\n
-
-   0 unknown revision 'foo'\n
-
-known
------
-
-Determine whether multiple nodes are known.
-
-The ``nodes`` argument is a list of space-delimited hex nodes to check
-for existence.
-
-The return type is ``string``.
-
-Returns a string consisting of ``0``s and ``1``s indicating whether nodes
-are known. If the Nth node specified in the ``nodes`` argument is known,
-a ``1`` will be returned at byte offset N. If the node isn't known, ``0``
-will be present at byte offset N.
-
-There is no trailing newline.
-
-protocaps
----------
-
-Notify the server about the client capabilities in the SSH V1 transport
-protocol.
-
-The ``caps`` argument is a space-delimited list of capabilities.
-
-The server will reply with the string ``OK``.
-
-pushkey
--------
-
-Set a value using the ``pushkey`` protocol.
-
-Accepts arguments ``namespace``, ``key``, ``old``, and ``new``, which
-correspond to the pushkey namespace to operate on, the key within that
-namespace to change, the old value (which may be empty), and the new value.
-All arguments are string types.
-
-The return type is a ``string``. The value depends on the transport protocol.
-
-The SSH version 1 transport sends a string encoded integer followed by a
-newline (``\n``) which indicates operation result. The server may send
-additional output on the ``stderr`` stream that should be displayed to the
-user.
-
-The HTTP version 1 transport sends a string encoded integer followed by a
-newline followed by additional server output that should be displayed to
-the user. This may include output from hooks, etc.
-
-The integer result varies by namespace. ``0`` means an error has occurred
-and there should be additional output to display to the user.
-
-stream_out
-----------
-
-Obtain *streaming clone* data.
-
-The return type is either a ``string`` or a ``stream``, depending on
-whether the request was fulfilled properly.
-
-A return value of ``1\n`` indicates the server is not configured to serve
-this data. If this is seen by the client, they may not have verified the
-``stream`` capability is set before making the request.
-
-A return value of ``2\n`` indicates the server was unable to lock the
-repository to generate data.
-
-All other responses are a ``stream`` of bytes. The first line of this data
-contains 2 space-delimited integers corresponding to the path count and
-payload size, respectively::
-
-    <path count> <payload size>\n
-
-The ``<payload size>`` is the total size of path data: it does not include
-the size of the per-path header lines.
-
-Following that header are ``<path count>`` entries. Each entry consists of a
-line with metadata followed by raw revlog data. The line consists of::
-
-    <store path>\0<size>\n
-
-The ``<store path>`` is the encoded store path of the data that follows.
-``<size>`` is the amount of data for this store path/revlog that follows the
-newline.
-
-There is no trailer to indicate end of data. Instead, the client should stop
-reading after ``<path count>`` entries are consumed.
-
-unbundle
---------
-
-Send a bundle containing data (usually changegroup data) to the server.
-
-Accepts the argument ``heads``, which is a space-delimited list of hex nodes
-corresponding to server repository heads observed by the client. This is used
-to detect race conditions and abort push operations before a server performs
-too much work or a client transfers too much data.
-
-The request payload consists of a bundle to be applied to the repository,
-similarly to as if :hg:`unbundle` were called.
-
-In most scenarios, a special ``push response`` type is returned. This type
-contains an integer describing the change in heads as a result of the
-operation. A value of ``0`` indicates nothing changed. ``1`` means the number
-of heads remained the same. Values ``2`` and larger indicate the number of
-added heads minus 1. e.g. ``3`` means 2 heads were added. Negative values
-indicate the number of fewer heads, also off by 1. e.g. ``-2`` means there
-is 1 fewer head.
-
-The encoding of the ``push response`` type varies by transport.
-
-For the SSH version 1 transport, this type is composed of 2 ``string``
-responses: an empty response (``0\n``) followed by the integer result value.
-e.g. ``1\n2``. So the full response might be ``0\n1\n2``.
-
-For the HTTP version 1 transport, the response is a ``string`` type composed
-of an integer result value followed by a newline (``\n``) followed by string
-content holding server output that should be displayed on the client (output
-hooks, etc).
-
-In some cases, the server may respond with a ``bundle2`` bundle. In this
-case, the response type is ``stream``. For the HTTP version 1 transport, the
-response is zlib compressed.
-
-The server may also respond with a generic error type, which contains a string
-indicating the failure.
--- a/mercurial/help/internals/wireprotocolrpc.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,740 +0,0 @@
-**Experimental and under development**
-
-This document describe's Mercurial's transport-agnostic remote procedure
-call (RPC) protocol which is used to perform interactions with remote
-servers. This protocol is also referred to as ``hgrpc``.
-
-The protocol has the following high-level features:
-
-* Concurrent request and response support (multiple commands can be issued
-  simultaneously and responses can be streamed simultaneously).
-* Supports half-duplex and full-duplex connections.
-* All data is transmitted within *frames*, which have a well-defined
-  header and encode their length.
-* Side-channels for sending progress updates and printing output. Text
-  output from the remote can be localized locally.
-* Support for simultaneous and long-lived compression streams, even across
-  requests.
-* Uses CBOR for data exchange.
-
-The protocol is not specific to Mercurial and could be used by other
-applications.
-
-High-level Overview
-===================
-
-To operate the protocol, a bi-directional, half-duplex pipe supporting
-ordered sends and receives is required. That is, each peer has one pipe
-for sending data and another for receiving. Full-duplex pipes are also
-supported.
-
-All data is read and written in atomic units called *frames*. These
-are conceptually similar to TCP packets. Higher-level functionality
-is built on the exchange and processing of frames.
-
-All frames are associated with a *stream*. A *stream* provides a
-unidirectional grouping of frames. Streams facilitate two goals:
-content encoding and parallelism. There is a dedicated section on
-streams below.
-
-The protocol is request-response based: the client issues requests to
-the server, which issues replies to those requests. Server-initiated
-messaging is not currently supported, but this specification carves
-out room to implement it.
-
-All frames are associated with a numbered request. Frames can thus
-be logically grouped by their request ID.
-
-Frames
-======
-
-Frames begin with an 8 octet header followed by a variable length
-payload::
-
-    +------------------------------------------------+
-    |                 Length (24)                    |
-    +--------------------------------+---------------+
-    |         Request ID (16)        | Stream ID (8) |
-    +------------------+-------------+---------------+
-    | Stream Flags (8) |
-    +-----------+------+
-    | Type (4)  |
-    +-----------+
-    | Flags (4) |
-    +===========+===================================================|
-    |                     Frame Payload (0...)                    ...
-    +---------------------------------------------------------------+
-
-The length of the frame payload is expressed as an unsigned 24 bit
-little endian integer. Values larger than 65535 MUST NOT be used unless
-given permission by the server as part of the negotiated capabilities
-during the handshake. The frame header is not part of the advertised
-frame length. The payload length is the over-the-wire length. If there
-is content encoding applied to the payload as part of the frame's stream,
-the length is the output of that content encoding, not the input.
-
-The 16-bit ``Request ID`` field denotes the integer request identifier,
-stored as an unsigned little endian integer. Odd numbered requests are
-client-initiated. Even numbered requests are server-initiated. This
-refers to where the *request* was initiated - not where the *frame* was
-initiated, so servers will send frames with odd ``Request ID`` in
-response to client-initiated requests. Implementations are advised to
-start ordering request identifiers at ``1`` and ``0``, increment by
-``2``, and wrap around if all available numbers have been exhausted.
-
-The 8-bit ``Stream ID`` field denotes the stream that the frame is
-associated with. Frames belonging to a stream may have content
-encoding applied and the receiver may need to decode the raw frame
-payload to obtain the original data. Odd numbered IDs are
-client-initiated. Even numbered IDs are server-initiated.
-
-The 8-bit ``Stream Flags`` field defines stream processing semantics.
-See the section on streams below.
-
-The 4-bit ``Type`` field denotes the type of frame being sent.
-
-The 4-bit ``Flags`` field defines special, per-type attributes for
-the frame.
-
-The sections below define the frame types and their behavior.
-
-Command Request (``0x01``)
---------------------------
-
-This frame contains a request to run a command.
-
-The payload consists of a CBOR map defining the command request. The
-bytestring keys of that map are:
-
-name
-   Name of the command that should be executed (bytestring).
-args
-   Map of bytestring keys to various value types containing the named
-   arguments to this command.
-
-   Each command defines its own set of argument names and their expected
-   types.
-
-redirect (optional)
-   (map) Advertises client support for following response *redirects*.
-
-   This map has the following bytestring keys:
-
-   targets
-      (array of bytestring) List of named redirect targets supported by
-      this client. The names come from the targets advertised by the
-      server's *capabilities* message.
-
-   hashes
-      (array of bytestring) List of preferred hashing algorithms that can
-      be used for content integrity verification.
-
-   See the *Content Redirects* section below for more on content redirects.
-
-This frame type MUST ONLY be sent from clients to servers: it is illegal
-for a server to send this frame to a client.
-
-The following flag values are defined for this type:
-
-0x01
-   New command request. When set, this frame represents the beginning
-   of a new request to run a command. The ``Request ID`` attached to this
-   frame MUST NOT be active.
-0x02
-   Command request continuation. When set, this frame is a continuation
-   from a previous command request frame for its ``Request ID``. This
-   flag is set when the CBOR data for a command request does not fit
-   in a single frame.
-0x04
-   Additional frames expected. When set, the command request didn't fit
-   into a single frame and additional CBOR data follows in a subsequent
-   frame.
-0x08
-   Command data frames expected. When set, command data frames are
-   expected to follow the final command request frame for this request.
-
-``0x01`` MUST be set on the initial command request frame for a
-``Request ID``.
-
-``0x01`` or ``0x02`` MUST be set to indicate this frame's role in
-a series of command request frames.
-
-If command data frames are to be sent, ``0x08`` MUST be set on ALL
-command request frames.
-
-Command Data (``0x02``)
------------------------
-
-This frame contains raw data for a command.
-
-Most commands can be executed by specifying arguments. However,
-arguments have an upper bound to their length. For commands that
-accept data that is beyond this length or whose length isn't known
-when the command is initially sent, they will need to stream
-arbitrary data to the server. This frame type facilitates the sending
-of this data.
-
-The payload of this frame type consists of a stream of raw data to be
-consumed by the command handler on the server. The format of the data
-is command specific.
-
-The following flag values are defined for this type:
-
-0x01
-   Command data continuation. When set, the data for this command
-   continues into a subsequent frame.
-
-0x02
-   End of data. When set, command data has been fully sent to the
-   server. The command has been fully issued and no new data for this
-   command will be sent. The next frame will belong to a new command.
-
-Command Response Data (``0x03``)
---------------------------------
-
-This frame contains response data to an issued command.
-
-Response data ALWAYS consists of a series of 1 or more CBOR encoded
-values. A CBOR value may be using indefinite length encoding. And the
-bytes constituting the value may span several frames.
-
-The following flag values are defined for this type:
-
-0x01
-   Data continuation. When set, an additional frame containing response data
-   will follow.
-0x02
-   End of data. When set, the response data has been fully sent and
-   no additional frames for this response will be sent.
-
-The ``0x01`` flag is mutually exclusive with the ``0x02`` flag.
-
-Error Occurred (``0x05``)
--------------------------
-
-Some kind of error occurred.
-
-There are 3 general kinds of failures that can occur:
-
-* Command error encountered before any response issued
-* Command error encountered after a response was issued
-* Protocol or stream level error
-
-This frame type is used to capture the latter cases. (The general
-command error case is handled by the leading CBOR map in
-``Command Response`` frames.)
-
-The payload of this frame contains a CBOR map detailing the error. That
-map has the following bytestring keys:
-
-type
-   (bytestring) The overall type of error encountered. Can be one of the
-   following values:
-
-   protocol
-      A protocol-level error occurred. This typically means someone
-      is violating the framing protocol semantics and the server is
-      refusing to proceed.
-
-   server
-      A server-level error occurred. This typically indicates some kind of
-      logic error on the server, likely the fault of the server.
-
-   command
-      A command-level error, likely the fault of the client.
-
-message
-   (array of maps) A richly formatted message that is intended for
-   human consumption. See the ``Human Output Side-Channel`` frame
-   section for a description of the format of this data structure.
-
-Human Output Side-Channel (``0x06``)
-------------------------------------
-
-This frame contains a message that is intended to be displayed to
-people. Whereas most frames communicate machine readable data, this
-frame communicates textual data that is intended to be shown to
-humans.
-
-The frame consists of a series of *formatting requests*. Each formatting
-request consists of a formatting string, arguments for that formatting
-string, and labels to apply to that formatting string.
-
-A formatting string is a printf()-like string that allows variable
-substitution within the string. Labels allow the rendered text to be
-*decorated*. Assuming use of the canonical Mercurial code base, a
-formatting string can be the input to the ``i18n._`` function. This
-allows messages emitted from the server to be localized. So even if
-the server has different i18n settings, people could see messages in
-their *native* settings. Similarly, the use of labels allows
-decorations like coloring and underlining to be applied using the
-client's configured rendering settings.
-
-Formatting strings are similar to ``printf()`` strings or how
-Python's ``%`` operator works. The only supported formatting sequences
-are ``%s`` and ``%%``. ``%s`` will be replaced by whatever the string
-at that position resolves to. ``%%`` will be replaced by ``%``. All
-other 2-byte sequences beginning with ``%`` represent a literal
-``%`` followed by that character. However, future versions of the
-wire protocol reserve the right to allow clients to opt in to receiving
-formatting strings with additional formatters, hence why ``%%`` is
-required to represent the literal ``%``.
-
-The frame payload consists of a CBOR array of CBOR maps. Each map
-defines an *atom* of text data to print. Each *atom* has the following
-bytestring keys:
-
-msg
-   (bytestring) The formatting string. Content MUST be ASCII.
-args (optional)
-   Array of bytestrings defining arguments to the formatting string.
-labels (optional)
-   Array of bytestrings defining labels to apply to this atom.
-
-All data to be printed MUST be encoded into a single frame: this frame
-does not support spanning data across multiple frames.
-
-All textual data encoded in these frames is assumed to be line delimited.
-The last atom in the frame SHOULD end with a newline (``\n``). If it
-doesn't, clients MAY add a newline to facilitate immediate printing.
-
-Progress Update (``0x07``)
---------------------------
-
-This frame holds the progress of an operation on the peer. Consumption
-of these frames allows clients to display progress bars, estimated
-completion times, etc.
-
-Each frame defines the progress of a single operation on the peer. The
-payload consists of a CBOR map with the following bytestring keys:
-
-topic
-   Topic name (string)
-pos
-   Current numeric position within the topic (integer)
-total
-   Total/end numeric position of this topic (unsigned integer)
-label (optional)
-   Unit label (string)
-item (optional)
-   Item name (string)
-
-Progress state is created when a frame is received referencing a
-*topic* that isn't currently tracked. Progress tracking for that
-*topic* is finished when a frame is received reporting the current
-position of that topic as ``-1``.
-
-Multiple *topics* may be active at any given time.
-
-Rendering of progress information is not mandated or governed by this
-specification: implementations MAY render progress information however
-they see fit, including not at all.
-
-The string data describing the topic SHOULD be static strings to
-facilitate receivers localizing that string data. The emitter
-MUST normalize all string data to valid UTF-8 and receivers SHOULD
-validate that received data conforms to UTF-8. The topic name
-SHOULD be ASCII.
-
-Sender Protocol Settings (``0x08``)
------------------------------------
-
-This frame type advertises the sender's support for various protocol and
-stream level features. The data advertised in this frame is used to influence
-subsequent behavior of the current frame exchange channel.
-
-The frame payload consists of a CBOR map. It may contain the following
-bytestring keys:
-
-contentencodings
-   (array of bytestring) A list of content encodings supported by the
-   sender, in order of most to least preferred.
-
-   Peers are allowed to encode stream data using any of the listed
-   encodings.
-
-   See the ``Content Encoding Profiles`` section for an enumeration
-   of supported content encodings.
-
-   If not defined, the value is assumed to be a list with the single value
-   ``identity``, meaning only the no-op encoding is supported.
-
-   Senders MAY filter the set of advertised encodings against what it
-   knows the receiver supports (e.g. if the receiver advertised encodings
-   via the capabilities descriptor). However, doing so will prevent
-   servers from gaining an understanding of the aggregate capabilities
-   of clients. So clients are discouraged from doing so.
-
-When this frame is not sent/received, the receiver assumes default values
-for all keys.
-
-If encountered, this frame type MUST be sent before any other frame type
-in a channel.
-
-The following flag values are defined for this frame type:
-
-0x01
-   Data continuation. When set, an additional frame containing more protocol
-   settings immediately follows.
-0x02
-   End of data. When set, the protocol settings data has been completely
-   sent.
-
-The ``0x01`` flag is mutually exclusive with the ``0x02`` flag.
-
-Stream Encoding Settings (``0x09``)
------------------------------------
-
-This frame type holds information defining the content encoding
-settings for a *stream*.
-
-This frame type is likely consumed by the protocol layer and is not
-passed on to applications.
-
-This frame type MUST ONLY occur on frames having the *Beginning of Stream*
-``Stream Flag`` set.
-
-The payload of this frame defines what content encoding has (possibly)
-been applied to the payloads of subsequent frames in this stream.
-
-The payload consists of a series of CBOR values. The first value is a
-bytestring denoting the content encoding profile of the data in this
-stream. Subsequent CBOR values supplement this simple value in a
-profile-specific manner. See the ``Content Encoding Profiles`` section
-for more.
-
-In the absence of this frame on a stream, it is assumed the stream is
-using the ``identity`` content encoding.
-
-The following flag values are defined for this frame type:
-
-0x01
-   Data continuation. When set, an additional frame containing more encoding
-   settings immediately follows.
-0x02
-   End of data. When set, the encoding settings data has been completely
-   sent.
-
-The ``0x01`` flag is mutually exclusive with the ``0x02`` flag.
-
-Stream States and Flags
-=======================
-
-Streams can be in two states: *open* and *closed*. An *open* stream
-is active and frames attached to that stream could arrive at any time.
-A *closed* stream is not active. If a frame attached to a *closed*
-stream arrives, that frame MUST have an appropriate stream flag
-set indicating beginning of stream. All streams are in the *closed*
-state by default.
-
-The ``Stream Flags`` field denotes a set of bit flags for defining
-the relationship of this frame within a stream. The following flags
-are defined:
-
-0x01
-   Beginning of stream. The first frame in the stream MUST set this
-   flag. When received, the ``Stream ID`` this frame is attached to
-   becomes ``open``.
-
-0x02
-   End of stream. The last frame in a stream MUST set this flag. When
-   received, the ``Stream ID`` this frame is attached to becomes
-   ``closed``. Any content encoding context associated with this stream
-   can be destroyed after processing the payload of this frame.
-
-0x04
-   Apply content encoding. When set, any content encoding settings
-   defined by the stream should be applied when attempting to read
-   the frame. When not set, the frame payload isn't encoded.
-
-TODO consider making stream opening and closing communicated via
-explicit frame types (e.g. a "stream state change" frame) rather than
-flags on all frames. This would make stream state changes more explicit,
-as they could only occur on specific frame types.
-
-Streams
-=======
-
-Streams - along with ``Request IDs`` - facilitate grouping of frames.
-But the purpose of each is quite different and the groupings they
-constitute are independent.
-
-A ``Request ID`` is essentially a tag. It tells you which logical
-request a frame is associated with.
-
-A *stream* is a sequence of frames grouped for the express purpose
-of applying a stateful encoding or for denoting sub-groups of frames.
-
-Unlike ``Request ID``s which span the request and response, a stream
-is unidirectional and stream IDs are independent from client to
-server.
-
-There is no strict hierarchical relationship between ``Request IDs``
-and *streams*. A stream can contain frames having multiple
-``Request IDs``. Frames belonging to the same ``Request ID`` can
-span multiple streams.
-
-One goal of streams is to facilitate content encoding. A stream can
-define an encoding to be applied to frame payloads. For example, the
-payload transmitted over the wire may contain output from a
-zstandard compression operation and the receiving end may decompress
-that payload to obtain the original data.
-
-The other goal of streams is to facilitate concurrent execution. For
-example, a server could spawn 4 threads to service a request that can
-be easily parallelized. Each of those 4 threads could write into its
-own stream. Those streams could then in turn be delivered to 4 threads
-on the receiving end, with each thread consuming its stream in near
-isolation. The *main* thread on both ends merely does I/O and
-encodes/decodes frame headers: the bulk of the work is done by worker
-threads.
-
-In addition, since content encoding is defined per stream, each
-*worker thread* could perform potentially CPU bound work concurrently
-with other threads. This approach of applying encoding at the
-sub-protocol / stream level eliminates a potential resource constraint
-on the protocol stream as a whole (it is common for the throughput of
-a compression engine to be smaller than the throughput of a network).
-
-Having multiple streams - each with their own encoding settings - also
-facilitates the use of advanced data compression techniques. For
-example, a transmitter could see that it is generating data faster
-and slower than the receiving end is consuming it and adjust its
-compression settings to trade CPU for compression ratio accordingly.
-
-While streams can define a content encoding, not all frames within
-that stream must use that content encoding. This can be useful when
-data is being served from caches and being derived dynamically. A
-cache could pre-compressed data so the server doesn't have to
-recompress it. The ability to pick and choose which frames are
-compressed allows servers to easily send data to the wire without
-involving potentially expensive encoding overhead.
-
-Content Encoding Profiles
-=========================
-
-Streams can have named content encoding *profiles* associated with
-them. A profile defines a shared understanding of content encoding
-settings and behavior.
-
-Profiles are described in the following sections.
-
-identity
---------
-
-The ``identity`` profile is a no-op encoding: the encoded bytes are
-exactly the input bytes.
-
-This profile MUST be supported by all peers.
-
-In the absence of an identified profile, the ``identity`` profile is
-assumed.
-
-zstd-8mb
---------
-
-Zstandard encoding (RFC 8478). Zstandard is a fast and effective lossless
-compression format.
-
-This profile allows decompressor window sizes of up to 8 MB.
-
-zlib
-----
-
-zlib compressed data (RFC 1950). zlib is a widely-used and supported
-lossless compression format.
-
-It isn't as fast as zstandard and it is recommended to use zstandard instead,
-if possible.
-
-Command Protocol
-================
-
-A client can request that a remote run a command by sending it
-frames defining that command. This logical stream is composed of
-1 or more ``Command Request`` frames and and 0 or more ``Command Data``
-frames.
-
-All frames composing a single command request MUST be associated with
-the same ``Request ID``.
-
-Clients MAY send additional command requests without waiting on the
-response to a previous command request. If they do so, they MUST ensure
-that the ``Request ID`` field of outbound frames does not conflict
-with that of an active ``Request ID`` whose response has not yet been
-fully received.
-
-Servers MAY respond to commands in a different order than they were
-sent over the wire. Clients MUST be prepared to deal with this. Servers
-also MAY start executing commands in a different order than they were
-received, or MAY execute multiple commands concurrently.
-
-If there is a dependency between commands or a race condition between
-commands executing (e.g. a read-only command that depends on the results
-of a command that mutates the repository), then clients MUST NOT send
-frames issuing a command until a response to all dependent commands has
-been received.
-TODO think about whether we should express dependencies between commands
-to avoid roundtrip latency.
-
-A command is defined by a command name, 0 or more command arguments,
-and optional command data.
-
-Arguments are the recommended mechanism for transferring fixed sets of
-parameters to a command. Data is appropriate for transferring variable
-data. Thinking in terms of HTTP, arguments would be headers and data
-would be the message body.
-
-It is recommended for servers to delay the dispatch of a command
-until all argument have been received. Servers MAY impose limits on the
-maximum argument size.
-TODO define failure mechanism.
-
-Servers MAY dispatch to commands immediately once argument data
-is available or delay until command data is received in full.
-
-Once a ``Command Request`` frame is sent, a client must be prepared to
-receive any of the following frames associated with that request:
-``Command Response``, ``Error Response``, ``Human Output Side-Channel``,
-``Progress Update``.
-
-The *main* response for a command will be in ``Command Response`` frames.
-The payloads of these frames consist of 1 or more CBOR encoded values.
-The first CBOR value on the first ``Command Response`` frame is special
-and denotes the overall status of the command. This CBOR map contains
-the following bytestring keys:
-
-status
-   (bytestring) A well-defined message containing the overall status of
-   this command request. The following values are defined:
-
-   ok
-      The command was received successfully and its response follows.
-   error
-      There was an error processing the command. More details about the
-      error are encoded in the ``error`` key.
-   redirect
-      The response for this command is available elsewhere. Details on
-      where are in the ``location`` key.
-
-error (optional)
-   A map containing information about an encountered error. The map has the
-   following keys:
-
-   message
-      (array of maps) A message describing the error. The message uses the
-      same format as those in the ``Human Output Side-Channel`` frame.
-
-location (optional)
-   (map) Presence indicates that a *content redirect* has occurred. The map
-   provides the external location of the content.
-
-   This map contains the following bytestring keys:
-
-   url
-      (bytestring) URL from which this content may be requested.
-
-   mediatype
-      (bytestring) The media type for the fetched content. e.g.
-      ``application/mercurial-*``.
-
-      In some transports, this value is also advertised by the transport.
-      e.g. as the ``Content-Type`` HTTP header.
-
-   size (optional)
-      (unsigned integer) Total size of remote object in bytes. This is
-      the raw size of the entity that will be fetched, minus any
-      non-Mercurial protocol encoding (e.g. HTTP content or transfer
-      encoding.)
-
-   fullhashes (optional)
-      (array of arrays) Content hashes for the entire payload. Each entry
-      is an array of bytestrings containing the hash name and the hash value.
-
-   fullhashseed (optional)
-      (bytestring) Optional seed value to feed into hasher for full content
-      hash verification.
-
-   serverdercerts (optional)
-      (array of bytestring) DER encoded x509 certificates for the server. When
-      defined, clients MAY validate that the x509 certificate on the target
-      server exactly matches the certificate used here.
-
-   servercadercerts (optional)
-      (array of bytestring) DER encoded x509 certificates for the certificate
-      authority of the target server. When defined, clients MAY validate that
-      the x509 on the target server was signed by CA certificate in this set.
-
-   # TODO support for giving client an x509 certificate pair to be used as a
-   # client certificate.
-
-   # TODO support common authentication mechanisms (e.g. HTTP basic/digest
-   # auth).
-
-   # TODO support custom authentication mechanisms. This likely requires
-   # server to advertise required auth mechanism so client can filter.
-
-   # TODO support chained hashes. e.g. hash for each 1MB segment so client
-   # can iteratively validate data without having to consume all of it first.
-
-TODO formalize when error frames can be seen and how errors can be
-recognized midway through a command response.
-
-Content Redirects
-=================
-
-Servers have the ability to respond to ANY command request with a
-*redirect* to another location. Such a response is referred to as a *redirect
-response*. (This feature is conceptually similar to HTTP redirects, but is
-more powerful.)
-
-A *redirect response* MUST ONLY be issued if the client advertises support
-for a redirect *target*.
-
-A *redirect response* MUST NOT be issued unless the client advertises support
-for one.
-
-Clients advertise support for *redirect responses* after looking at the server's
-*capabilities* data, which is fetched during initial server connection
-handshake. The server's capabilities data advertises named *targets* for
-potential redirects.
-
-Each target is described by a protocol name, connection and protocol features,
-etc. The server also advertises target-agnostic redirect settings, such as
-which hash algorithms are supported for content integrity checking. (See
-the documentation for the *capabilities* command for more.)
-
-Clients examine the set of advertised redirect targets for compatibility.
-When sending a command request, the client advertises the set of redirect
-target names it is willing to follow, along with some other settings influencing
-behavior.
-
-For example, say the server is advertising a ``cdn`` redirect target that
-requires SNI and TLS 1.2. If the client supports those features, it will
-send command requests stating that the ``cdn`` target is acceptable to use.
-But if the client doesn't support SNI or TLS 1.2 (or maybe it encountered an
-error using this target from a previous request), then it omits this target
-name.
-
-If the client advertises support for a redirect target, the server MAY
-substitute the normal, inline response data for a *redirect response* -
-one where the initial CBOR map has a ``status`` key with value ``redirect``.
-
-The *redirect response* at a minimum advertises the URL where the response
-can be retrieved.
-
-The *redirect response* MAY also advertise additional details about that
-content and how to retrieve it. Notably, the response may contain the
-x509 public certificates for the server being redirected to or the
-certificate authority that signed that server's certificate. Unless the
-client has existing settings that offer stronger trust validation than what
-the server advertises, the client SHOULD use the server-provided certificates
-when validating the connection to the remote server in place of any default
-connection verification checks. This is because certificates coming from
-the server SHOULD establish a stronger chain of trust than what the default
-certification validation mechanism in most environments provides. (By default,
-certificate validation ensures the signer of the cert chains up to a set of
-trusted root certificates. And if an explicit certificate or CA certificate
-is presented, that greadly reduces the set of certificates that will be
-recognized as valid, thus reducing the potential for a "bad" certificate
-to be used and trusted.)
--- a/mercurial/help/internals/wireprotocolv2.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,724 +0,0 @@
-**Experimental and under active development**
-
-This section documents the wire protocol commands exposed to transports
-using the frame-based protocol. The set of commands exposed through
-these transports is distinct from the set of commands exposed to legacy
-transports.
-
-The frame-based protocol uses CBOR to encode command execution requests.
-All command arguments must be mapped to a specific or set of CBOR data
-types.
-
-The response to many commands is also CBOR. There is no common response
-format: each command defines its own response format.
-
-TODOs
-=====
-
-* Add "node namespace" support to each command. In order to support
-  SHA-1 hash transition, we want servers to be able to expose different
-  "node namespaces" for the same data. Every command operating on nodes
-  should specify which "node namespace" it is operating on and responses
-  should encode the "node namespace" accordingly.
-
-Commands
-========
-
-The sections below detail all commands available to wire protocol version
-2.
-
-branchmap
----------
-
-Obtain heads in named branches.
-
-Receives no arguments.
-
-The response is a map with bytestring keys defining the branch name.
-Values are arrays of bytestring defining raw changeset nodes.
-
-capabilities
-------------
-
-Obtain the server's capabilities.
-
-Receives no arguments.
-
-This command is typically called only as part of the handshake during
-initial connection establishment.
-
-The response is a map with bytestring keys defining server information.
-
-The defined keys are:
-
-commands
-   A map defining available wire protocol commands on this server.
-
-   Keys in the map are the names of commands that can be invoked. Values
-   are maps defining information about that command. The bytestring keys
-   are:
-
-      args
-         (map) Describes arguments accepted by the command.
-
-         Keys are bytestrings denoting the argument name.
-
-         Values are maps describing the argument. The map has the following
-         bytestring keys:
-
-         default
-            (varied) The default value for this argument if not specified. Only
-            present if ``required`` is not true.
-
-         required
-            (boolean) Whether the argument must be specified. Failure to send
-            required arguments will result in an error executing the command.
-
-         type
-            (bytestring) The type of the argument. e.g. ``bytes`` or ``bool``.
-
-         validvalues
-            (set) Values that are recognized for this argument. Some arguments
-            only allow a fixed set of values to be specified. These arguments
-            may advertise that set in this key. If this set is advertised and
-            a value not in this set is specified, the command should result
-            in error.
-
-      permissions
-         An array of permissions required to execute this command.
-
-      *
-         (various) Individual commands may define extra keys that supplement
-         generic command metadata. See the command definition for more.
-
-framingmediatypes
-   An array of bytestrings defining the supported framing protocol
-   media types. Servers will not accept media types not in this list.
-
-pathfilterprefixes
-   (set of bytestring) Matcher prefixes that are recognized when performing
-   path filtering. Specifying a path filter whose type/prefix does not
-   match one in this set will likely be rejected by the server.
-
-rawrepoformats
-   An array of storage formats the repository is using. This set of
-   requirements can be used to determine whether a client can read a
-   *raw* copy of file data available.
-
-redirect
-   A map declaring potential *content redirects* that may be used by this
-   server. Contains the following bytestring keys:
-
-   targets
-      (array of maps) Potential redirect targets. Values are maps describing
-      this target in more detail. Each map has the following bytestring keys:
-
-      name
-         (bytestring) Identifier for this target. The identifier will be used
-         by clients to uniquely identify this target.
-
-      protocol
-         (bytestring) High-level network protocol. Values can be
-         ``http``, ```https``, ``ssh``, etc.
-
-      uris
-          (array of bytestrings) Representative URIs for this target.
-
-      snirequired (optional)
-          (boolean) Indicates whether Server Name Indication is required
-          to use this target. Defaults to False.
-
-      tlsversions (optional)
-          (array of bytestring) Indicates which TLS versions are supported by
-          this target. Values are ``1.1``, ``1.2``, ``1.3``, etc.
-
-   hashes
-      (array of bytestring) Indicates support for hashing algorithms that are
-      used to ensure content integrity. Values include ``sha1``, ``sha256``,
-      etc.
-
-changesetdata
--------------
-
-Obtain various data related to changesets.
-
-The command accepts the following arguments:
-
-revisions
-   (array of maps) Specifies revisions whose data is being requested. Each
-   value in the array is a map describing revisions. See the
-   *Revisions Specifiers* section below for the format of this map.
-
-   Data will be sent for the union of all revisions resolved by all
-   revision specifiers.
-
-   Only revision specifiers operating on changeset revisions are allowed.
-
-fields
-   (set of bytestring) Which data associated with changelog revisions to
-   fetch. The following values are recognized:
-
-   bookmarks
-      Bookmarks associated with a revision.
-
-   parents
-      Parent revisions.
-
-   phase
-      The phase state of a revision.
-
-   revision
-      The raw, revision data for the changelog entry. The hash of this data
-      will match the revision's node value.
-
-The response bytestream starts with a CBOR map describing the data that follows.
-This map has the following bytestring keys:
-
-totalitems
-   (unsigned integer) Total number of changelog revisions whose data is being
-   transferred. This maps to the set of revisions in the requested node
-   range, not the total number of records that follow (see below for why).
-
-Following the map header is a series of 0 or more CBOR values. If values
-are present, the first value will always be a map describing a single changeset
-revision.
-
-If the ``fieldsfollowing`` key is present, the map will immediately be followed
-by N CBOR bytestring values, where N is the number of elements in
-``fieldsfollowing``. Each bytestring value corresponds to a field denoted
-by ``fieldsfollowing``.
-
-Following the optional bytestring field values is the next revision descriptor
-map, or end of stream.
-
-Each revision descriptor map has the following bytestring keys:
-
-node
-   (bytestring) The node value for this revision. This is the SHA-1 hash of
-   the raw revision data.
-
-bookmarks (optional)
-   (array of bytestrings) Bookmarks attached to this revision. Only present
-   if ``bookmarks`` data is being requested and the revision has bookmarks
-   attached.
-
-fieldsfollowing (optional)
-   (array of 2-array) Denotes what fields immediately follow this map. Each
-   value is an array with 2 elements: the bytestring field name and an unsigned
-   integer describing the length of the data, in bytes.
-
-   If this key isn't present, no special fields will follow this map.
-
-   The following fields may be present:
-
-   revision
-      Raw, revision data for the changelog entry. Contains a serialized form
-      of the changeset data, including the author, date, commit message, set
-      of changed files, manifest node, and other metadata.
-
-      Only present if the ``revision`` field was requested.
-
-parents (optional)
-   (array of bytestrings) The nodes representing the parent revisions of this
-   revision. Only present if ``parents`` data is being requested.
-
-phase (optional)
-   (bytestring) The phase that a revision is in. Recognized values are
-   ``secret``, ``draft``, and ``public``. Only present if ``phase`` data
-   is being requested.
-
-The set of changeset revisions emitted may not match the exact set of
-changesets requested. Furthermore, the set of keys present on each
-map may vary. This is to facilitate emitting changeset updates as well
-as new revisions.
-
-For example, if the request wants ``phase`` and ``revision`` data,
-the response may contain entries for each changeset in the common nodes
-set with the ``phase`` key and without the ``revision`` key in order
-to reflect a phase-only update.
-
-TODO support different revision selection mechanisms (e.g. non-public, specific
-revisions)
-TODO support different hash "namespaces" for revisions (e.g. sha-1 versus other)
-TODO support emitting obsolescence data
-TODO support filtering based on relevant paths (narrow clone)
-TODO support hgtagsfnodes cache / tags data
-TODO support branch heads cache
-TODO consider unify query mechanism. e.g. as an array of "query descriptors"
-rather than a set of top-level arguments that have semantics when combined.
-
-filedata
---------
-
-Obtain various data related to an individual tracked file.
-
-The command accepts the following arguments:
-
-fields
-   (set of bytestring) Which data associated with a file to fetch.
-   The following values are recognized:
-
-   linknode
-      The changeset node introducing this revision.
-
-   parents
-      Parent nodes for the revision.
-
-   revision
-      The raw revision data for a file.
-
-haveparents
-   (bool) Whether the client has the parent revisions of all requested
-   nodes. If set, the server may emit revision data as deltas against
-   any parent revision. If not set, the server MUST only emit deltas for
-   revisions previously emitted by this command.
-
-   False is assumed in the absence of any value.
-
-nodes
-   (array of bytestrings) File nodes whose data to retrieve.
-
-path
-   (bytestring) Path of the tracked file whose data to retrieve.
-
-TODO allow specifying revisions via alternate means (such as from
-changeset revisions or ranges)
-
-The response bytestream starts with a CBOR map describing the data that
-follows. It has the following bytestream keys:
-
-totalitems
-   (unsigned integer) Total number of file revisions whose data is
-   being returned.
-
-Following the map header is a series of 0 or more CBOR values. If values
-are present, the first value will always be a map describing a single changeset
-revision.
-
-If the ``fieldsfollowing`` key is present, the map will immediately be followed
-by N CBOR bytestring values, where N is the number of elements in
-``fieldsfollowing``. Each bytestring value corresponds to a field denoted
-by ``fieldsfollowing``.
-
-Following the optional bytestring field values is the next revision descriptor
-map, or end of stream.
-
-Each revision descriptor map has the following bytestring keys:
-
-Each map has the following bytestring keys:
-
-node
-   (bytestring) The node of the file revision whose data is represented.
-
-deltabasenode
-   (bytestring) Node of the file revision the following delta is against.
-
-   Only present if the ``revision`` field is requested and delta data
-   follows this map.
-
-fieldsfollowing
-   (array of 2-array) Denotes extra bytestring fields that following this map.
-   See the documentation for ``changesetdata`` for semantics.
-
-   The following named fields may be present:
-
-   ``delta``
-      The delta data to use to construct the fulltext revision.
-
-      Only present if the ``revision`` field is requested and a delta is
-      being emitted. The ``deltabasenode`` top-level key will also be
-      present if this field is being emitted.
-
-   ``revision``
-      The fulltext revision data for this manifest. Only present if the
-      ``revision`` field is requested and a fulltext revision is being emitted.
-
-parents
-   (array of bytestring) The nodes of the parents of this file revision.
-
-   Only present if the ``parents`` field is requested.
-
-When ``revision`` data is requested, the server chooses to emit either fulltext
-revision data or a delta. What the server decides can be inferred by looking
-for the presence of the ``delta`` or ``revision`` keys in the
-``fieldsfollowing`` array.
-
-filesdata
----------
-
-Obtain various data related to multiple tracked files for specific changesets.
-
-This command is similar to ``filedata`` with the main difference being that
-individual requests operate on multiple file paths. This allows clients to
-request data for multiple paths by issuing a single command.
-
-The command accepts the following arguments:
-
-fields
-   (set of bytestring) Which data associated with a file to fetch.
-   The following values are recognized:
-
-   linknode
-      The changeset node introducing this revision.
-
-   parents
-      Parent nodes for the revision.
-
-   revision
-      The raw revision data for a file.
-
-haveparents
-   (bool) Whether the client has the parent revisions of all requested
-   nodes.
-
-pathfilter
-   (map) Defines a filter that determines what file paths are relevant.
-
-   See the *Path Filters* section for more.
-
-   If the argument is omitted, it is assumed that all paths are relevant.
-
-revisions
-   (array of maps) Specifies revisions whose data is being requested. Each value
-   in the array is a map describing revisions. See the *Revisions Specifiers*
-   section below for the format of this map.
-
-   Data will be sent for the union of all revisions resolved by all revision
-   specifiers.
-
-   Only revision specifiers operating on changeset revisions are allowed.
-
-The response bytestream starts with a CBOR map describing the data that
-follows. This map has the following bytestring keys:
-
-totalpaths
-   (unsigned integer) Total number of paths whose data is being transferred.
-
-totalitems
-   (unsigned integer) Total number of file revisions whose data is being
-   transferred.
-
-Following the map header are 0 or more sequences of CBOR values. Each sequence
-represents data for a specific tracked path. Each sequence begins with a CBOR
-map describing the file data that follows. Following that map is N CBOR values
-describing file revision data. The format of this data is identical to that
-returned by the ``filedata`` command.
-
-Each sequence's map header has the following bytestring keys:
-
-path
-   (bytestring) The tracked file path whose data follows.
-
-totalitems
-   (unsigned integer) Total number of file revisions whose data is being
-   transferred.
-
-The ``haveparents`` argument has significant implications on the data
-transferred.
-
-When ``haveparents`` is true, the command MAY only emit data for file
-revisions introduced by the set of changeset revisions whose data is being
-requested. In other words, the command may assume that all file revisions
-for all relevant paths for ancestors of the requested changeset revisions
-are present on the receiver.
-
-When ``haveparents`` is false, the command MUST assume that the receiver
-has no file revisions data. This means that all referenced file revisions
-in the queried set of changeset revisions will be sent.
-
-TODO we want a more complicated mechanism for the client to specify which
-ancestor revisions are known. This is needed so intelligent deltas can be
-emitted and so updated linknodes can be sent if the client needs to adjust
-its linknodes for existing file nodes to older changeset revisions.
-TODO we may want to make linknodes an array so multiple changesets can be
-marked as introducing a file revision, since this can occur with e.g. hidden
-changesets.
-
-heads
------
-
-Obtain DAG heads in the repository.
-
-The command accepts the following arguments:
-
-publiconly (optional)
-   (boolean) If set, operate on the DAG for public phase changesets only.
-   Non-public (i.e. draft) phase DAG heads will not be returned.
-
-The response is a CBOR array of bytestrings defining changeset nodes
-of DAG heads. The array can be empty if the repository is empty or no
-changesets satisfied the request.
-
-TODO consider exposing phase of heads in response
-
-known
------
-
-Determine whether a series of changeset nodes is known to the server.
-
-The command accepts the following arguments:
-
-nodes
-   (array of bytestrings) List of changeset nodes whose presence to
-   query.
-
-The response is a bytestring where each byte contains a 0 or 1 for the
-corresponding requested node at the same index.
-
-TODO use a bit array for even more compact response
-
-listkeys
---------
-
-List values in a specified ``pushkey`` namespace.
-
-The command receives the following arguments:
-
-namespace
-   (bytestring) Pushkey namespace to query.
-
-The response is a map with bytestring keys and values.
-
-TODO consider using binary to represent nodes in certain pushkey namespaces.
-
-lookup
-------
-
-Try to resolve a value to a changeset revision.
-
-Unlike ``known`` which operates on changeset nodes, lookup operates on
-node fragments and other names that a user may use.
-
-The command receives the following arguments:
-
-key
-   (bytestring) Value to try to resolve.
-
-On success, returns a bytestring containing the resolved node.
-
-manifestdata
-------------
-
-Obtain various data related to manifests (which are lists of files in
-a revision).
-
-The command accepts the following arguments:
-
-fields
-   (set of bytestring) Which data associated with manifests to fetch.
-   The following values are recognized:
-
-   parents
-      Parent nodes for the manifest.
-
-   revision
-      The raw revision data for the manifest.
-
-haveparents
-   (bool) Whether the client has the parent revisions of all requested
-   nodes. If set, the server may emit revision data as deltas against
-   any parent revision. If not set, the server MUST only emit deltas for
-   revisions previously emitted by this command.
-
-   False is assumed in the absence of any value.
-
-nodes
-   (array of bytestring) Manifest nodes whose data to retrieve.
-
-tree
-   (bytestring) Path to manifest to retrieve. The empty bytestring represents
-   the root manifest. All other values represent directories/trees within
-   the repository.
-
-TODO allow specifying revisions via alternate means (such as from changeset
-revisions or ranges)
-TODO consider recursive expansion of manifests (with path filtering for
-narrow use cases)
-
-The response bytestream starts with a CBOR map describing the data that
-follows. It has the following bytestring keys:
-
-totalitems
-   (unsigned integer) Total number of manifest revisions whose data is
-   being returned.
-
-Following the map header is a series of 0 or more CBOR values. If values
-are present, the first value will always be a map describing a single manifest
-revision.
-
-If the ``fieldsfollowing`` key is present, the map will immediately be followed
-by N CBOR bytestring values, where N is the number of elements in
-``fieldsfollowing``. Each bytestring value corresponds to a field denoted
-by ``fieldsfollowing``.
-
-Following the optional bytestring field values is the next revision descriptor
-map, or end of stream.
-
-Each revision descriptor map has the following bytestring keys:
-
-node
-   (bytestring) The node of the manifest revision whose data is represented.
-
-deltabasenode
-   (bytestring) The node that the delta representation of this revision is
-   computed against. Only present if the ``revision`` field is requested and
-   a delta is being emitted.
-
-fieldsfollowing
-   (array of 2-array) Denotes extra bytestring fields that following this map.
-   See the documentation for ``changesetdata`` for semantics.
-
-   The following named fields may be present:
-
-   ``delta``
-      The delta data to use to construct the fulltext revision.
-
-      Only present if the ``revision`` field is requested and a delta is
-      being emitted. The ``deltabasenode`` top-level key will also be
-      present if this field is being emitted.
-
-   ``revision``
-      The fulltext revision data for this manifest. Only present if the
-      ``revision`` field is requested and a fulltext revision is being emitted.
-
-parents
-   (array of bytestring) The nodes of the parents of this manifest revision.
-   Only present if the ``parents`` field is requested.
-
-When ``revision`` data is requested, the server chooses to emit either fulltext
-revision data or a delta. What the server decides can be inferred by looking
-for the presence of ``delta`` or ``revision`` in the ``fieldsfollowing`` array.
-
-Servers MAY advertise the following extra fields in the capabilities
-descriptor for this command:
-
-recommendedbatchsize
-   (unsigned integer) Number of revisions the server recommends as a batch
-   query size. If defined, clients needing to issue multiple ``manifestdata``
-   commands to obtain needed data SHOULD construct their commands to have
-   this many revisions per request.
-
-pushkey
--------
-
-Set a value using the ``pushkey`` protocol.
-
-The command receives the following arguments:
-
-namespace
-   (bytestring) Pushkey namespace to operate on.
-key
-   (bytestring) The pushkey key to set.
-old
-   (bytestring) Old value for this key.
-new
-   (bytestring) New value for this key.
-
-TODO consider using binary to represent nodes is certain pushkey namespaces.
-TODO better define response type and meaning.
-
-rawstorefiledata
-----------------
-
-Allows retrieving raw files used to store repository data.
-
-The command accepts the following arguments:
-
-files
-   (array of bytestring) Describes the files that should be retrieved.
-
-   The meaning of values in this array is dependent on the storage backend used
-   by the server.
-
-The response bytestream starts with a CBOR map describing the data that follows.
-This map has the following bytestring keys:
-
-filecount
-   (unsigned integer) Total number of files whose data is being transferred.
-
-totalsize
-   (unsigned integer) Total size in bytes of files data that will be
-   transferred. This is file on-disk size and not wire size.
-
-Following the map header are N file segments. Each file segment consists of a
-CBOR map followed by an indefinite length bytestring. Each map has the following
-bytestring keys:
-
-location
-   (bytestring) Denotes the location in the repository where the file should be
-   written. Values map to vfs instances to use for the writing.
-
-path
-   (bytestring) Path of file being transferred. Path is the raw store
-   path and can be any sequence of bytes that can be tracked in a Mercurial
-   manifest.
-
-size
-   (unsigned integer) Size of file data. This will be the final written
-   file size. The total size of the data that follows the CBOR map
-   will be greater due to encoding overhead of CBOR.
-
-TODO this command is woefully incomplete. If we are to move forward with a
-stream clone analog, it needs a lot more metadata around how to describe what
-files are available to retrieve, other semantics.
-
-Revision Specifiers
-===================
-
-A *revision specifier* is a map that evaluates to a set of revisions.
-
-A *revision specifier* has a ``type`` key that defines the revision
-selection type to perform. Other keys in the map are used in a
-type-specific manner.
-
-The following types are defined:
-
-changesetexplicit
-   An explicit set of enumerated changeset revisions.
-
-   The ``nodes`` key MUST contain an array of full binary nodes, expressed
-   as bytestrings.
-
-changesetexplicitdepth
-   Like ``changesetexplicit``, but contains a ``depth`` key defining the
-   unsigned integer number of ancestor revisions to also resolve. For each
-   value in ``nodes``, DAG ancestors will be walked until up to N total
-   revisions from that ancestry walk are present in the final resolved set.
-
-changesetdagrange
-   Defines revisions via a DAG range of changesets on the changelog.
-
-   The ``roots`` key MUST contain an array of full, binary node values
-   representing the *root* revisions.
-
-   The ``heads`` key MUST contain an array of full, binary nodes values
-   representing the *head* revisions.
-
-   The DAG range between ``roots`` and ``heads`` will be resolved and all
-   revisions between will be used. Nodes in ``roots`` are not part of the
-   resolved set. Nodes in ``heads`` are. The ``roots`` array may be empty.
-   The ``heads`` array MUST be defined.
-
-Path Filters
-============
-
-Various commands accept a *path filter* argument that defines the set of file
-paths relevant to the request.
-
-A *path filter* is defined as a map with the bytestring keys ``include`` and
-``exclude``. Each is an array of bytestring values. Each value defines a pattern
-rule (see :hg:`help patterns`) that is used to match file paths.
-
-A path matches the path filter if it is matched by a rule in the ``include``
-set but doesn't match a rule in the ``exclude`` set. In other words, a path
-matcher takes the union of all ``include`` patterns and then substracts the
-union of all ``exclude`` patterns.
-
-Patterns MUST be prefixed with their pattern type. Only the following pattern
-types are allowed: ``path:``, ``rootfilesin:``.
-
-If the ``include`` key is omitted, it is assumed that all paths are
-relevant. The patterns from ``exclude`` will still be used, if defined.
-
-An example value is ``path:tests/foo``, which would match a file named
-``tests/foo`` or a directory ``tests/foo`` and all files under it.
--- a/mercurial/help/merge-tools.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,109 +0,0 @@
-To merge files Mercurial uses merge tools.
-
-A merge tool combines two different versions of a file into a merged
-file. Merge tools are given the two files and the greatest common
-ancestor of the two file versions, so they can determine the changes
-made on both branches.
-
-Merge tools are used both for :hg:`resolve`, :hg:`merge`, :hg:`update`,
-:hg:`backout` and in several extensions.
-
-Usually, the merge tool tries to automatically reconcile the files by
-combining all non-overlapping changes that occurred separately in
-the two different evolutions of the same initial base file. Furthermore, some
-interactive merge programs make it easier to manually resolve
-conflicting merges, either in a graphical way, or by inserting some
-conflict markers. Mercurial does not include any interactive merge
-programs but relies on external tools for that.
-
-Available merge tools
-=====================
-
-External merge tools and their properties are configured in the
-merge-tools configuration section - see hgrc(5) - but they can often just
-be named by their executable.
-
-A merge tool is generally usable if its executable can be found on the
-system and if it can handle the merge. The executable is found if it
-is an absolute or relative executable path or the name of an
-application in the executable search path. The tool is assumed to be
-able to handle the merge if it can handle symlinks if the file is a
-symlink, if it can handle binary files if the file is binary, and if a
-GUI is available if the tool requires a GUI.
-
-There are some internal merge tools which can be used. The internal
-merge tools are:
-
-.. internaltoolsmarker
-
-Internal tools are always available and do not require a GUI but will
-by default not handle symlinks or binary files. See next section for
-detail about "actual capabilities" described above.
-
-Choosing a merge tool
-=====================
-
-Mercurial uses these rules when deciding which merge tool to use:
-
-1. If a tool has been specified with the --tool option to merge or resolve, it
-   is used.  If it is the name of a tool in the merge-tools configuration, its
-   configuration is used. Otherwise the specified tool must be executable by
-   the shell.
-
-2. If the ``HGMERGE`` environment variable is present, its value is used and
-   must be executable by the shell.
-
-3. If the filename of the file to be merged matches any of the patterns in the
-   merge-patterns configuration section, the first usable merge tool
-   corresponding to a matching pattern is used.
-
-4. If ui.merge is set it will be considered next. If the value is not the name
-   of a configured tool, the specified value is used and must be executable by
-   the shell. Otherwise the named tool is used if it is usable.
-
-5. If any usable merge tools are present in the merge-tools configuration
-   section, the one with the highest priority is used.
-
-6. If a program named ``hgmerge`` can be found on the system, it is used - but
-   it will by default not be used for symlinks and binary files.
-
-7. If the file to be merged is not binary and is not a symlink, then
-   internal ``:merge`` is used.
-
-8. Otherwise, ``:prompt`` is used.
-
-For historical reason, Mercurial treats merge tools as below while
-examining rules above.
-
-==== =============== ====== =======
-step specified via   binary symlink
-==== =============== ====== =======
-1.   --tool          o/o    o/o
-2.   HGMERGE         o/o    o/o
-3.   merge-patterns  o/o(*) x/?(*)
-4.   ui.merge        x/?(*) x/?(*)
-==== =============== ====== =======
-
-Each capability column indicates Mercurial behavior for
-internal/external merge tools at examining each rule.
-
-- "o": "assume that a tool has capability"
-- "x": "assume that a tool does not have capability"
-- "?": "check actual capability of a tool"
-
-If ``merge.strict-capability-check`` configuration is true, Mercurial
-checks capabilities of merge tools strictly in (*) cases above (= each
-capability column becomes "?/?"). It is false by default for backward
-compatibility.
-
-.. note::
-
-   After selecting a merge program, Mercurial will by default attempt
-   to merge the files using a simple merge algorithm first. Only if it doesn't
-   succeed because of conflicting changes will Mercurial actually execute the
-   merge program. Whether to use the simple merge algorithm first can be
-   controlled by the premerge setting of the merge tool. Premerge is enabled by
-   default unless the file is binary or a symlink.
-
-See the merge-tools and ui sections of hgrc(5) for details on the
-configuration of merge tools.
--- a/mercurial/help/pager.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-Some Mercurial commands can produce a lot of output, and Mercurial will
-attempt to use a pager to make those commands more pleasant.
-
-To set the pager that should be used, set the application variable::
-
-  [pager]
-  pager = less -FRX
-
-If no pager is set in the user or repository configuration, Mercurial uses the
-environment variable $PAGER. If $PAGER is not set, pager.pager from the default
-or system configuration is used. If none of these are set, a default pager will
-be used, typically `less` on Unix and `more` on Windows.
-
-.. container:: windows
-
-  On Windows, `more` is not color aware, so using it effectively disables color.
-  MSYS and Cygwin shells provide `less` as a pager, which can be configured to
-  support ANSI color codes.  See :hg:`help config.color.pagermode` to configure
-  the color mode when invoking a pager.
-
-You can disable the pager for certain commands by adding them to the
-pager.ignore list::
-
-  [pager]
-  ignore = version, help, update
-
-To ignore global commands like :hg:`version` or :hg:`help`, you have
-to specify them in your user configuration file.
-
-To control whether the pager is used at all for an individual command,
-you can use --pager=<value>:
-
-  - use as needed: `auto`.
-  - require the pager: `yes` or `on`.
-  - suppress the pager: `no` or `off` (any unrecognized value
-    will also work).
-
-To globally turn off all attempts to use a pager, set::
-
-  [ui]
-  paginate = never
-
-which will prevent the pager from running.
--- a/mercurial/help/patterns.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-Mercurial accepts several notations for identifying one or more files
-at a time.
-
-By default, Mercurial treats filenames as shell-style extended glob
-patterns.
-
-Alternate pattern notations must be specified explicitly.
-
-.. note::
-
-  Patterns specified in ``.hgignore`` are not rooted.
-  Please see :hg:`help hgignore` for details.
-
-To use a plain path name without any pattern matching, start it with
-``path:``. These path names must completely match starting at the
-current repository root, and when the path points to a directory, it is matched
-recursively. To match all files in a directory non-recursively (not including
-any files in subdirectories), ``rootfilesin:`` can be used, specifying an
-absolute path (relative to the repository root).
-
-To use an extended glob, start a name with ``glob:``. Globs are rooted
-at the current directory; a glob such as ``*.c`` will only match files
-in the current directory ending with ``.c``. ``rootglob:`` can be used
-instead of ``glob:`` for a glob that is rooted at the root of the
-repository.
-
-The supported glob syntax extensions are ``**`` to match any string
-across path separators and ``{a,b}`` to mean "a or b".
-
-To use a Perl/Python regular expression, start a name with ``re:``.
-Regexp pattern matching is anchored at the root of the repository.
-
-To read name patterns from a file, use ``listfile:`` or ``listfile0:``.
-The latter expects null delimited patterns while the former expects line
-feeds. Each string read from the file is itself treated as a file
-pattern.
-
-To read a set of patterns from a file, use ``include:`` or ``subinclude:``.
-``include:`` will use all the patterns from the given file and treat them as if
-they had been passed in manually.  ``subinclude:`` will only apply the patterns
-against files that are under the subinclude file's directory. See :hg:`help
-hgignore` for details on the format of these files.
-
-All patterns, except for ``glob:`` specified in command line (not for
-``-I`` or ``-X`` options), can match also against directories: files
-under matched directories are treated as matched.
-For ``-I`` and ``-X`` options, ``glob:`` will match directories recursively.
-
-Plain examples::
-
-  path:foo/bar        a name bar in a directory named foo in the root
-                      of the repository
-  path:path:name      a file or directory named "path:name"
-  rootfilesin:foo/bar the files in a directory called foo/bar, but not any files
-                      in its subdirectories and not a file bar in directory foo
-
-Glob examples::
-
-  glob:*.c       any name ending in ".c" in the current directory
-  *.c            any name ending in ".c" in the current directory
-  **.c           any name ending in ".c" in any subdirectory of the
-                 current directory including itself.
-  foo/*          any file in directory foo
-  foo/**         any file in directory foo plus all its subdirectories,
-                 recursively
-  foo/*.c        any name ending in ".c" in the directory foo
-  foo/**.c       any name ending in ".c" in any subdirectory of foo
-                 including itself.
-  rootglob:*.c   any name ending in ".c" in the root of the repository
-
-Regexp examples::
-
-  re:.*\.c$      any name ending in ".c", anywhere in the repository
-
-File examples::
-
-  listfile:list.txt  read list from list.txt with one file pattern per line
-  listfile0:list.txt read list from list.txt with null byte delimiters
-
-See also :hg:`help filesets`.
-
-Include examples::
-
-  include:path/to/mypatternfile    reads patterns to be applied to all paths
-  subinclude:path/to/subignorefile reads patterns specifically for paths in the
-                                   subdirectory
--- a/mercurial/help/phases.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,100 +0,0 @@
-What are phases?
-================
-
-Phases are a system for tracking which changesets have been or should
-be shared. This helps prevent common mistakes when modifying history
-(for instance, with the mq or rebase extensions).
-
-Each changeset in a repository is in one of the following phases:
-
- - public : changeset is visible on a public server
- - draft : changeset is not yet published
- - secret : changeset should not be pushed, pulled, or cloned
-
-These phases are ordered (public < draft < secret) and no changeset
-can be in a lower phase than its ancestors. For instance, if a
-changeset is public, all its ancestors are also public. Lastly,
-changeset phases should only be changed towards the public phase.
-
-How are phases managed?
-=======================
-
-For the most part, phases should work transparently. By default, a
-changeset is created in the draft phase and is moved into the public
-phase when it is pushed to another repository.
-
-Once changesets become public, extensions like mq and rebase will
-refuse to operate on them to prevent creating duplicate changesets.
-Phases can also be manually manipulated with the :hg:`phase` command
-if needed. See :hg:`help -v phase` for examples.
-
-To make your commits secret by default, put this in your
-configuration file::
-
-  [phases]
-  new-commit = secret
-
-Phases and servers
-==================
-
-Normally, all servers are ``publishing`` by default. This means::
-
- - all draft changesets that are pulled or cloned appear in phase
- public on the client
-
- - all draft changesets that are pushed appear as public on both
- client and server
-
- - secret changesets are neither pushed, pulled, or cloned
-
-.. note::
-
-  Pulling a draft changeset from a publishing server does not mark it
-  as public on the server side due to the read-only nature of pull.
-
-Sometimes it may be desirable to push and pull changesets in the draft
-phase to share unfinished work. This can be done by setting a
-repository to disable publishing in its configuration file::
-
-  [phases]
-  publish = False
-
-See :hg:`help config` for more information on configuration files.
-
-.. note::
-
-  Servers running older versions of Mercurial are treated as
-  publishing.
-
-.. note::
-
-   Changesets in secret phase are not exchanged with the server. This
-   applies to their content: file names, file contents, and changeset
-   metadata. For technical reasons, the identifier (e.g. d825e4025e39)
-   of the secret changeset may be communicated to the server.
-
-
-Examples
-========
-
- - list changesets in draft or secret phase::
-
-     hg log -r "not public()"
-
- - change all secret changesets to draft::
-
-     hg phase --draft "secret()"
-
- - forcibly move the current changeset and descendants from public to draft::
-
-     hg phase --force --draft .
-
- - show a list of changeset revisions and each corresponding phase::
-
-     hg log --template "{rev} {phase}\n"
-
- - resynchronize draft changesets relative to a remote repository::
-
-     hg phase -fd "outgoing(URL)"
-
-See :hg:`help phase` for more information on manually manipulating phases.
--- a/mercurial/help/revisions.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,223 +0,0 @@
-Mercurial supports several ways to specify revisions.
-
-Specifying single revisions
-===========================
-
-A plain integer is treated as a revision number. Negative integers are
-treated as sequential offsets from the tip, with -1 denoting the tip,
--2 denoting the revision prior to the tip, and so forth.
-
-A 40-digit hexadecimal string is treated as a unique revision identifier.
-A hexadecimal string less than 40 characters long is treated as a
-unique revision identifier and is referred to as a short-form
-identifier. A short-form identifier is only valid if it is the prefix
-of exactly one full-length identifier.
-
-Any other string is treated as a bookmark, tag, or branch name. A
-bookmark is a movable pointer to a revision. A tag is a permanent name
-associated with a revision. A branch name denotes the tipmost open branch head
-of that branch - or if they are all closed, the tipmost closed head of the
-branch. Bookmark, tag, and branch names must not contain the ":" character.
-
-The reserved name "tip" always identifies the most recent revision.
-
-The reserved name "null" indicates the null revision. This is the
-revision of an empty repository, and the parent of revision 0.
-
-The reserved name "." indicates the working directory parent. If no
-working directory is checked out, it is equivalent to null. If an
-uncommitted merge is in progress, "." is the revision of the first
-parent.
-
-Finally, commands that expect a single revision (like ``hg update``) also
-accept revsets (see below for details). When given a revset, they use the
-last revision of the revset. A few commands accept two single revisions
-(like ``hg diff``). When given a revset, they use the first and the last
-revisions of the revset.
-
-Specifying multiple revisions
-=============================
-
-Mercurial supports a functional language for selecting a set of
-revisions. Expressions in this language are called revsets.
-
-The language supports a number of predicates which are joined by infix
-operators. Parenthesis can be used for grouping.
-
-Identifiers such as branch names may need quoting with single or
-double quotes if they contain characters like ``-`` or if they match
-one of the predefined predicates.
-
-Special characters can be used in quoted identifiers by escaping them,
-e.g., ``\n`` is interpreted as a newline. To prevent them from being
-interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``.
-
-Operators
-=========
-
-There is a single prefix operator:
-
-``not x``
-  Changesets not in x. Short form is ``! x``.
-
-These are the supported infix operators:
-
-``x::y``
-  A DAG range, meaning all changesets that are descendants of x and
-  ancestors of y, including x and y themselves. If the first endpoint
-  is left out, this is equivalent to ``ancestors(y)``, if the second
-  is left out it is equivalent to ``descendants(x)``.
-
-  An alternative syntax is ``x..y``.
-
-``x:y``
-  All changesets with revision numbers between x and y, both
-  inclusive. Either endpoint can be left out, they default to 0 and
-  tip.
-
-``x and y``
-  The intersection of changesets in x and y. Short form is ``x & y``.
-
-``x or y``
-  The union of changesets in x and y. There are two alternative short
-  forms: ``x | y`` and ``x + y``.
-
-``x - y``
-  Changesets in x but not in y.
-
-``x % y``
-  Changesets that are ancestors of x but not ancestors of y (i.e. ::x - ::y).
-  This is shorthand notation for ``only(x, y)`` (see below). The second
-  argument is optional and, if left out, is equivalent to ``only(x)``.
-
-``x^n``
-  The nth parent of x, n == 0, 1, or 2.
-  For n == 0, x; for n == 1, the first parent of each changeset in x;
-  for n == 2, the second parent of changeset in x.
-
-``x~n``
-  The nth first ancestor of x; ``x~0`` is x; ``x~3`` is ``x^^^``.
-  For n < 0, the nth unambiguous descendent of x.
-
-``x ## y``
-  Concatenate strings and identifiers into one string.
-
-  All other prefix, infix and postfix operators have lower priority than
-  ``##``. For example, ``a1 ## a2~2`` is equivalent to ``(a1 ## a2)~2``.
-
-  For example::
-
-    [revsetalias]
-    issue(a1) = grep(r'\bissue[ :]?' ## a1 ## r'\b|\bbug\(' ## a1 ## r'\)')
-
-  ``issue(1234)`` is equivalent to
-  ``grep(r'\bissue[ :]?1234\b|\bbug\(1234\)')``
-  in this case. This matches against all of "issue 1234", "issue:1234",
-  "issue1234" and "bug(1234)".
-
-There is a single postfix operator:
-
-``x^``
-  Equivalent to ``x^1``, the first parent of each changeset in x.
-
-Patterns
-========
-
-Where noted, predicates that perform string matching can accept a pattern
-string. The pattern may be either a literal, or a regular expression. If the
-pattern starts with ``re:``, the remainder of the pattern is treated as a
-regular expression. Otherwise, it is treated as a literal. To match a pattern
-that actually starts with ``re:``, use the prefix ``literal:``.
-
-Matching is case-sensitive, unless otherwise noted.  To perform a case-
-insensitive match on a case-sensitive predicate, use a regular expression,
-prefixed with ``(?i)``.
-
-For example, ``tag(r're:(?i)release')`` matches "release" or "RELEASE"
-or "Release", etc.
-
-Predicates
-==========
-
-The following predicates are supported:
-
-.. predicatesmarker
-
-Aliases
-=======
-
-New predicates (known as "aliases") can be defined, using any combination of
-existing predicates or other aliases. An alias definition looks like::
-
-  <alias> = <definition>
-
-in the ``revsetalias`` section of a Mercurial configuration file. Arguments
-of the form `a1`, `a2`, etc. are substituted from the alias into the
-definition.
-
-For example,
-
-::
-
-  [revsetalias]
-  h = heads()
-  d(s) = sort(s, date)
-  rs(s, k) = reverse(sort(s, k))
-
-defines three aliases, ``h``, ``d``, and ``rs``. ``rs(0:tip, author)`` is
-exactly equivalent to ``reverse(sort(0:tip, author))``.
-
-Equivalents
-===========
-
-Command line equivalents for :hg:`log`::
-
-  -f    ->  ::.
-  -d x  ->  date(x)
-  -k x  ->  keyword(x)
-  -m    ->  merge()
-  -u x  ->  user(x)
-  -b x  ->  branch(x)
-  -P x  ->  !::x
-  -l x  ->  limit(expr, x)
-
-Examples
-========
-
-Some sample queries:
-
-- Changesets on the default branch::
-
-    hg log -r "branch(default)"
-
-- Changesets on the default branch since tag 1.5 (excluding merges)::
-
-    hg log -r "branch(default) and 1.5:: and not merge()"
-
-- Open branch heads::
-
-    hg log -r "head() and not closed()"
-
-- Changesets between tags 1.3 and 1.5 mentioning "bug" that affect
-  ``hgext/*``::
-
-    hg log -r "1.3::1.5 and keyword(bug) and file('hgext/*')"
-
-- Changesets committed in May 2008, sorted by user::
-
-    hg log -r "sort(date('May 2008'), user)"
-
-- Changesets mentioning "bug" or "issue" that are not in a tagged
-  release::
-
-    hg log -r "(keyword(bug) or keyword(issue)) and not ancestors(tag())"
-
-- Update to the commit that bookmark @ is pointing to, without activating the
-  bookmark (this works because the last revision of the revset is used)::
-
-    hg update :@
-
-- Show diff between tags 1.3 and 1.5 (this works because the first and the
-  last revisions of the revset are used)::
-
-    hg diff -r 1.3::1.5
--- a/mercurial/help/scripting.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,202 +0,0 @@
-It is common for machines (as opposed to humans) to consume Mercurial.
-This help topic describes some of the considerations for interfacing
-machines with Mercurial.
-
-Choosing an Interface
-=====================
-
-Machines have a choice of several methods to interface with Mercurial.
-These include:
-
-- Executing the ``hg`` process
-- Querying a HTTP server
-- Calling out to a command server
-
-Executing ``hg`` processes is very similar to how humans interact with
-Mercurial in the shell. It should already be familiar to you.
-
-:hg:`serve` can be used to start a server. By default, this will start
-a "hgweb" HTTP server. This HTTP server has support for machine-readable
-output, such as JSON. For more, see :hg:`help hgweb`.
-
-:hg:`serve` can also start a "command server." Clients can connect
-to this server and issue Mercurial commands over a special protocol.
-For more details on the command server, including links to client
-libraries, see https://www.mercurial-scm.org/wiki/CommandServer.
-
-:hg:`serve` based interfaces (the hgweb and command servers) have the
-advantage over simple ``hg`` process invocations in that they are
-likely more efficient. This is because there is significant overhead
-to spawn new Python processes.
-
-.. tip::
-
-   If you need to invoke several ``hg`` processes in short order and/or
-   performance is important to you, use of a server-based interface
-   is highly recommended.
-
-Environment Variables
-=====================
-
-As documented in :hg:`help environment`, various environment variables
-influence the operation of Mercurial. The following are particularly
-relevant for machines consuming Mercurial:
-
-HGPLAIN
-    If not set, Mercurial's output could be influenced by configuration
-    settings that impact its encoding, verbose mode, localization, etc.
-
-    It is highly recommended for machines to set this variable when
-    invoking ``hg`` processes.
-
-HGENCODING
-    If not set, the locale used by Mercurial will be detected from the
-    environment. If the determined locale does not support display of
-    certain characters, Mercurial may render these character sequences
-    incorrectly (often by using "?" as a placeholder for invalid
-    characters in the current locale).
-
-    Explicitly setting this environment variable is a good practice to
-    guarantee consistent results. "utf-8" is a good choice on UNIX-like
-    environments.
-
-HGRCPATH
-    If not set, Mercurial will inherit config options from config files
-    using the process described in :hg:`help config`. This includes
-    inheriting user or system-wide config files.
-
-    When utmost control over the Mercurial configuration is desired, the
-    value of ``HGRCPATH`` can be set to an explicit file with known good
-    configs. In rare cases, the value can be set to an empty file or the
-    null device (often ``/dev/null``) to bypass loading of any user or
-    system config files. Note that these approaches can have unintended
-    consequences, as the user and system config files often define things
-    like the username and extensions that may be required to interface
-    with a repository.
-
-Command-line Flags
-==================
-
-Mercurial's default command-line parser is designed for humans, and is not
-robust against malicious input. For instance, you can start a debugger by
-passing ``--debugger`` as an option value::
-
-    $ REV=--debugger sh -c 'hg log -r "$REV"'
-
-This happens because several command-line flags need to be scanned without
-using a concrete command table, which may be modified while loading repository
-settings and extensions.
-
-Since Mercurial 4.4.2, the parsing of such flags may be restricted by setting
-``HGPLAIN=+strictflags``. When this feature is enabled, all early options
-(e.g. ``-R/--repository``, ``--cwd``, ``--config``) must be specified first
-amongst the other global options, and cannot be injected to an arbitrary
-location::
-
-    $ HGPLAIN=+strictflags hg -R "$REPO" log -r "$REV"
-
-In earlier Mercurial versions where ``+strictflags`` isn't available, you
-can mitigate the issue by concatenating an option value with its flag::
-
-    $ hg log -r"$REV" --keyword="$KEYWORD"
-
-Consuming Command Output
-========================
-
-It is common for machines to need to parse the output of Mercurial
-commands for relevant data. This section describes the various
-techniques for doing so.
-
-Parsing Raw Command Output
---------------------------
-
-Likely the simplest and most effective solution for consuming command
-output is to simply invoke ``hg`` commands as you would as a user and
-parse their output.
-
-The output of many commands can easily be parsed with tools like
-``grep``, ``sed``, and ``awk``.
-
-A potential downside with parsing command output is that the output
-of commands can change when Mercurial is upgraded. While Mercurial
-does generally strive for strong backwards compatibility, command
-output does occasionally change. Having tests for your automated
-interactions with ``hg`` commands is generally recommended, but is
-even more important when raw command output parsing is involved.
-
-Using Templates to Control Output
----------------------------------
-
-Many ``hg`` commands support templatized output via the
-``-T/--template`` argument. For more, see :hg:`help templates`.
-
-Templates are useful for explicitly controlling output so that
-you get exactly the data you want formatted how you want it. For
-example, ``log -T {node}\n`` can be used to print a newline
-delimited list of changeset nodes instead of a human-tailored
-output containing authors, dates, descriptions, etc.
-
-.. tip::
-
-   If parsing raw command output is too complicated, consider
-   using templates to make your life easier.
-
-The ``-T/--template`` argument allows specifying pre-defined styles.
-Mercurial ships with the machine-readable styles ``cbor``, ``json``,
-and ``xml``, which provide CBOR, JSON, and XML output, respectively.
-These are useful for producing output that is machine readable as-is.
-
-(Mercurial 5.0 is required for CBOR style.)
-
-.. important::
-
-   The ``json`` and ``xml`` styles are considered experimental. While
-   they may be attractive to use for easily obtaining machine-readable
-   output, their behavior may change in subsequent versions.
-
-   These styles may also exhibit unexpected results when dealing with
-   certain encodings. Mercurial treats things like filenames as a
-   series of bytes and normalizing certain byte sequences to JSON
-   or XML with certain encoding settings can lead to surprises.
-
-Command Server Output
----------------------
-
-If using the command server to interact with Mercurial, you are likely
-using an existing library/API that abstracts implementation details of
-the command server. If so, this interface layer may perform parsing for
-you, saving you the work of implementing it yourself.
-
-Output Verbosity
-----------------
-
-Commands often have varying output verbosity, even when machine
-readable styles are being used (e.g. ``-T json``). Adding
-``-v/--verbose`` and ``--debug`` to the command's arguments can
-increase the amount of data exposed by Mercurial.
-
-An alternate way to get the data you need is by explicitly specifying
-a template.
-
-Other Topics
-============
-
-revsets
-   Revisions sets is a functional query language for selecting a set
-   of revisions. Think of it as SQL for Mercurial repositories. Revsets
-   are useful for querying repositories for specific data.
-
-   See :hg:`help revsets` for more.
-
-share extension
-   The ``share`` extension provides functionality for sharing
-   repository data across several working copies. It can even
-   automatically "pool" storage for logically related repositories when
-   cloning.
-
-   Configuring the ``share`` extension can lead to significant resource
-   utilization reduction, particularly around disk space and the
-   network. This is especially true for continuous integration (CI)
-   environments.
-
-   See :hg:`help -e share` for more.
--- a/mercurial/help/subrepos.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,171 +0,0 @@
-Subrepositories let you nest external repositories or projects into a
-parent Mercurial repository, and make commands operate on them as a
-group.
-
-Mercurial currently supports Mercurial, Git, and Subversion
-subrepositories.
-
-Subrepositories are made of three components:
-
-1. Nested repository checkouts. They can appear anywhere in the
-   parent working directory.
-
-2. Nested repository references. They are defined in ``.hgsub``, which
-   should be placed in the root of working directory, and
-   tell where the subrepository checkouts come from. Mercurial
-   subrepositories are referenced like::
-
-     path/to/nested = https://example.com/nested/repo/path
-
-   Git and Subversion subrepos are also supported::
-
-     path/to/nested = [git]git://example.com/nested/repo/path
-     path/to/nested = [svn]https://example.com/nested/trunk/path
-
-   where ``path/to/nested`` is the checkout location relatively to the
-   parent Mercurial root, and ``https://example.com/nested/repo/path``
-   is the source repository path. The source can also reference a
-   filesystem path.
-
-   Note that ``.hgsub`` does not exist by default in Mercurial
-   repositories, you have to create and add it to the parent
-   repository before using subrepositories.
-
-3. Nested repository states. They are defined in ``.hgsubstate``, which
-   is placed in the root of working directory, and
-   capture whatever information is required to restore the
-   subrepositories to the state they were committed in a parent
-   repository changeset. Mercurial automatically record the nested
-   repositories states when committing in the parent repository.
-
-   .. note::
-
-      The ``.hgsubstate`` file should not be edited manually.
-
-
-Adding a Subrepository
-======================
-
-If ``.hgsub`` does not exist, create it and add it to the parent
-repository. Clone or checkout the external projects where you want it
-to live in the parent repository. Edit ``.hgsub`` and add the
-subrepository entry as described above. At this point, the
-subrepository is tracked and the next commit will record its state in
-``.hgsubstate`` and bind it to the committed changeset.
-
-Synchronizing a Subrepository
-=============================
-
-Subrepos do not automatically track the latest changeset of their
-sources. Instead, they are updated to the changeset that corresponds
-with the changeset checked out in the top-level changeset. This is so
-developers always get a consistent set of compatible code and
-libraries when they update.
-
-Thus, updating subrepos is a manual process. Simply check out target
-subrepo at the desired revision, test in the top-level repo, then
-commit in the parent repository to record the new combination.
-
-Deleting a Subrepository
-========================
-
-To remove a subrepository from the parent repository, delete its
-reference from ``.hgsub``, then remove its files.
-
-Interaction with Mercurial Commands
-===================================
-
-:add: add does not recurse in subrepos unless -S/--subrepos is
-    specified.  However, if you specify the full path of a file in a
-    subrepo, it will be added even without -S/--subrepos specified.
-    Subversion subrepositories are currently silently
-    ignored.
-
-:addremove: addremove does not recurse into subrepos unless
-    -S/--subrepos is specified.  However, if you specify the full
-    path of a directory in a subrepo, addremove will be performed on
-    it even without -S/--subrepos being specified.  Git and
-    Subversion subrepositories will print a warning and continue.
-
-:archive: archive does not recurse in subrepositories unless
-    -S/--subrepos is specified.
-
-:cat: Git subrepositories only support exact file matches.
-    Subversion subrepositories are currently ignored.
-
-:commit: commit creates a consistent snapshot of the state of the
-    entire project and its subrepositories. If any subrepositories
-    have been modified, Mercurial will abort.  Mercurial can be made
-    to instead commit all modified subrepositories by specifying
-    -S/--subrepos, or setting "ui.commitsubrepos=True" in a
-    configuration file (see :hg:`help config`).  After there are no
-    longer any modified subrepositories, it records their state and
-    finally commits it in the parent repository.  The --addremove
-    option also honors the -S/--subrepos option.  However, Git and
-    Subversion subrepositories will print a warning and abort.
-
-:diff: diff does not recurse in subrepos unless -S/--subrepos is
-    specified.  However, if you specify the full path of a file or
-    directory in a subrepo, it will be diffed even without
-    -S/--subrepos being specified.  Subversion subrepositories are
-    currently silently ignored.
-
-:files: files does not recurse into subrepos unless -S/--subrepos is
-    specified.  However, if you specify the full path of a file or
-    directory in a subrepo, it will be displayed even without
-    -S/--subrepos being specified.  Git and Subversion subrepositories
-    are currently silently ignored.
-
-:forget: forget currently only handles exact file matches in subrepos.
-    Git and Subversion subrepositories are currently silently ignored.
-
-:incoming: incoming does not recurse in subrepos unless -S/--subrepos
-    is specified. Git and Subversion subrepositories are currently
-    silently ignored.
-
-:outgoing: outgoing does not recurse in subrepos unless -S/--subrepos
-    is specified. Git and Subversion subrepositories are currently
-    silently ignored.
-
-:pull: pull is not recursive since it is not clear what to pull prior
-    to running :hg:`update`. Listing and retrieving all
-    subrepositories changes referenced by the parent repository pulled
-    changesets is expensive at best, impossible in the Subversion
-    case.
-
-:push: Mercurial will automatically push all subrepositories first
-    when the parent repository is being pushed. This ensures new
-    subrepository changes are available when referenced by top-level
-    repositories.  Push is a no-op for Subversion subrepositories.
-
-:serve: serve does not recurse into subrepositories unless
-    -S/--subrepos is specified.  Git and Subversion subrepositories
-    are currently silently ignored.
-
-:status: status does not recurse into subrepositories unless
-    -S/--subrepos is specified. Subrepository changes are displayed as
-    regular Mercurial changes on the subrepository
-    elements. Subversion subrepositories are currently silently
-    ignored.
-
-:remove: remove does not recurse into subrepositories unless
-    -S/--subrepos is specified.  However, if you specify a file or
-    directory path in a subrepo, it will be removed even without
-    -S/--subrepos.  Git and Subversion subrepositories are currently
-    silently ignored.
-
-:update: update restores the subrepos in the state they were
-    originally committed in target changeset. If the recorded
-    changeset is not available in the current subrepository, Mercurial
-    will pull it in first before updating.  This means that updating
-    can require network access when using subrepositories.
-
-Remapping Subrepositories Sources
-=================================
-
-A subrepository source location may change during a project life,
-invalidating references stored in the parent repository history. To
-fix this, rewriting rules can be defined in parent repository ``hgrc``
-file or in Mercurial configuration. See the ``[subpaths]`` section in
-hgrc(5) for more details.
-
--- a/mercurial/help/templates.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,215 +0,0 @@
-Mercurial allows you to customize output of commands through
-templates. You can either pass in a template or select an existing
-template-style from the command line, via the --template option.
-
-You can customize output for any "log-like" command: log,
-outgoing, incoming, tip, parents, and heads.
-
-Some built-in styles are packaged with Mercurial. These can be listed
-with :hg:`log --template list`. Example usage::
-
-    $ hg log -r1.0::1.1 --template changelog
-
-A template is a piece of text, with markup to invoke variable
-expansion::
-
-    $ hg log -r1 --template "{node}\n"
-    b56ce7b07c52de7d5fd79fb89701ea538af65746
-
-Keywords
-========
-
-Strings in curly braces are called keywords. The availability of
-keywords depends on the exact context of the templater. These
-keywords are usually available for templating a log-like command:
-
-.. keywordsmarker
-
-The "date" keyword does not produce human-readable output. If you
-want to use a date in your output, you can use a filter to process
-it. Filters are functions which return a string based on the input
-variable. Be sure to use the stringify filter first when you're
-applying a string-input filter to a list-like input variable.
-You can also use a chain of filters to get the desired output::
-
-   $ hg tip --template "{date|isodate}\n"
-   2008-08-21 18:22 +0000
-
-Filters
-=======
-
-List of filters:
-
-.. filtersmarker
-
-Note that a filter is nothing more than a function call, i.e.
-``expr|filter`` is equivalent to ``filter(expr)``.
-
-Functions
-=========
-
-In addition to filters, there are some basic built-in functions:
-
-.. functionsmarker
-
-Operators
-=========
-
-We provide a limited set of infix arithmetic operations on integers::
-
-  + for addition
-  - for subtraction
-  * for multiplication
-  / for floor division (division rounded to integer nearest -infinity)
-
-Division fulfills the law x = x / y + mod(x, y).
-
-Also, for any expression that returns a list, there is a list operator::
-
-    expr % "{template}"
-
-As seen in the above example, ``{template}`` is interpreted as a template.
-To prevent it from being interpreted, you can use an escape character ``\{``
-or a raw string prefix, ``r'...'``.
-
-The dot operator can be used as a shorthand for accessing a sub item:
-
-- ``expr.member`` is roughly equivalent to ``expr % '{member}'`` if ``expr``
-  returns a non-list/dict. The returned value is not stringified.
-- ``dict.key`` is identical to ``get(dict, 'key')``.
-
-Aliases
-=======
-
-New keywords and functions can be defined in the ``templatealias`` section of
-a Mercurial configuration file::
-
-  <alias> = <definition>
-
-Arguments of the form `a1`, `a2`, etc. are substituted from the alias into
-the definition.
-
-For example,
-
-::
-
-  [templatealias]
-  r = rev
-  rn = "{r}:{node|short}"
-  leftpad(s, w) = pad(s, w, ' ', True)
-
-defines two symbol aliases, ``r`` and ``rn``, and a function alias
-``leftpad()``.
-
-It's also possible to specify complete template strings, using the
-``templates`` section. The syntax used is the general template string syntax.
-
-For example,
-
-::
-
-  [templates]
-  nodedate = "{node|short}: {date(date, "%Y-%m-%d")}\n"
-
-defines a template, ``nodedate``, which can be called like::
-
-  $ hg log -r . -Tnodedate
-
-A template defined in ``templates`` section can also be referenced from
-another template::
-
-  $ hg log -r . -T "{rev} {nodedate}"
-
-but be aware that the keywords cannot be overridden by templates. For example,
-a template defined as ``templates.rev`` cannot be referenced as ``{rev}``.
-
-A template defined in ``templates`` section may have sub templates which
-are inserted before/after/between items::
-
-  [templates]
-  myjson = ' {dict(rev, node|short)|json}'
-  myjson:docheader = '\{\n'
-  myjson:docfooter = '\n}\n'
-  myjson:separator = ',\n'
-
-Examples
-========
-
-Some sample command line templates:
-
-- Format lists, e.g. files::
-
-   $ hg log -r 0 --template "files:\n{files % '  {file}\n'}"
-
-- Join the list of files with a ", "::
-
-   $ hg log -r 0 --template "files: {join(files, ', ')}\n"
-
-- Join the list of files ending with ".py" with a ", "::
-
-   $ hg log -r 0 --template "pythonfiles: {join(files('**.py'), ', ')}\n"
-
-- Separate non-empty arguments by a " "::
-
-   $ hg log -r 0 --template "{separate(' ', node, bookmarks, tags}\n"
-
-- Modify each line of a commit description::
-
-   $ hg log --template "{splitlines(desc) % '**** {line}\n'}"
-
-- Format date::
-
-   $ hg log -r 0 --template "{date(date, '%Y')}\n"
-
-- Display date in UTC::
-
-   $ hg log -r 0 --template "{localdate(date, 'UTC')|date}\n"
-
-- Output the description set to a fill-width of 30::
-
-   $ hg log -r 0 --template "{fill(desc, 30)}"
-
-- Use a conditional to test for the default branch::
-
-   $ hg log -r 0 --template "{ifeq(branch, 'default', 'on the main branch',
-   'on branch {branch}')}\n"
-
-- Append a newline if not empty::
-
-   $ hg tip --template "{if(author, '{author}\n')}"
-
-- Label the output for use with the color extension::
-
-   $ hg log -r 0 --template "{label('changeset.{phase}', node|short)}\n"
-
-- Invert the firstline filter, i.e. everything but the first line::
-
-   $ hg log -r 0 --template "{sub(r'^.*\n?\n?', '', desc)}\n"
-
-- Display the contents of the 'extra' field, one per line::
-
-   $ hg log -r 0 --template "{join(extras, '\n')}\n"
-
-- Mark the active bookmark with '*'::
-
-   $ hg log --template "{bookmarks % '{bookmark}{ifeq(bookmark, active, '*')} '}\n"
-
-- Find the previous release candidate tag, the distance and changes since the tag::
-
-   $ hg log -r . --template "{latesttag('re:^.*-rc$') % '{tag}, {changes}, {distance}'}\n"
-
-- Mark the working copy parent with '@'::
-
-   $ hg log --template "{ifcontains(rev, revset('.'), '@')}\n"
-
-- Show details of parent revisions::
-
-   $ hg log --template "{revset('parents(%d)', rev) % '{desc|firstline}\n'}"
-
-- Show only commit descriptions that start with "template"::
-
-   $ hg log --template "{startswith('template', firstline(desc))}\n"
-
-- Print the first word of each line of a commit message::
-
-   $ hg log --template "{word(0, desc)}\n"
--- a/mercurial/help/urls.txt	Thu Jan 09 14:19:20 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-Valid URLs are of the form::
-
-  local/filesystem/path[#revision]
-  file://local/filesystem/path[#revision]
-  http://[user[:pass]@]host[:port]/[path][#revision]
-  https://[user[:pass]@]host[:port]/[path][#revision]
-  ssh://[user@]host[:port]/[path][#revision]
-
-Paths in the local filesystem can either point to Mercurial
-repositories or to bundle files (as created by :hg:`bundle` or
-:hg:`incoming --bundle`). See also :hg:`help paths`.
-
-An optional identifier after # indicates a particular branch, tag, or
-changeset to use from the remote repository. See also :hg:`help
-revisions`.
-
-Some features, such as pushing to http:// and https:// URLs are only
-possible if the feature is explicitly enabled on the remote Mercurial
-server.
-
-Note that the security of HTTPS URLs depends on proper configuration of
-web.cacerts.
-
-Some notes about using SSH with Mercurial:
-
-- SSH requires an accessible shell account on the destination machine
-  and a copy of hg in the remote path or specified with remotecmd.
-- path is relative to the remote user's home directory by default. Use
-  an extra slash at the start of a path to specify an absolute path::
-
-    ssh://example.com//tmp/repository
-
-- Mercurial doesn't use its own compression via SSH; the right thing
-  to do is to configure it in your ~/.ssh/config, e.g.::
-
-    Host *.mylocalnetwork.example.com
-      Compression no
-    Host *
-      Compression yes
-
-  Alternatively specify "ssh -C" as your ssh command in your
-  configuration file or with the --ssh command line option.
-
-These URLs can all be stored in your configuration file with path
-aliases under the [paths] section like so::
-
-  [paths]
-  alias1 = URL1
-  alias2 = URL2
-  ...
-
-You can then use the alias for any command that uses a URL (for
-example :hg:`pull alias1` will be treated as :hg:`pull URL1`).
-
-Two path aliases are special because they are used as defaults when
-you do not provide the URL to a command:
-
-default:
-  When you create a repository with hg clone, the clone command saves
-  the location of the source repository as the new repository's
-  'default' path. This is then used when you omit path from push- and
-  pull-like commands (including incoming and outgoing).
-
-default-push:
-  The push command will look for a path named 'default-push', and
-  prefer it over 'default' if both are defined.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/bundlespec.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,84 @@
+Mercurial supports generating standalone "bundle" files that hold repository
+data. These "bundles" are typically saved locally and used later or exchanged
+between different repositories, possibly on different machines. Example
+commands using bundles are :hg:`bundle` and :hg:`unbundle`.
+
+Generation of bundle files is controlled by a "bundle specification"
+("bundlespec") string. This string tells the bundle generation process how
+to create the bundle.
+
+A "bundlespec" string is composed of the following elements:
+
+type
+    A string denoting the bundle format to use.
+
+compression
+    Denotes the compression engine to use compressing the raw bundle data.
+
+parameters
+    Arbitrary key-value parameters to further control bundle generation.
+
+A "bundlespec" string has the following formats:
+
+<type>
+    The literal bundle format string is used.
+
+<compression>-<type>
+    The compression engine and format are delimited by a hyphen (``-``).
+
+Optional parameters follow the ``<type>``. Parameters are URI escaped
+``key=value`` pairs. Each pair is delimited by a semicolon (``;``). The
+first parameter begins after a ``;`` immediately following the ``<type>``
+value.
+
+Available Types
+===============
+
+The following bundle <type> strings are available:
+
+v1
+    Produces a legacy "changegroup" version 1 bundle.
+
+    This format is compatible with nearly all Mercurial clients because it is
+    the oldest. However, it has some limitations, which is why it is no longer
+    the default for new repositories.
+
+    ``v1`` bundles can be used with modern repositories using the "generaldelta"
+    storage format. However, it may take longer to produce the bundle and the
+    resulting bundle may be significantly larger than a ``v2`` bundle.
+
+    ``v1`` bundles can only use the ``gzip``, ``bzip2``, and ``none`` compression
+    formats.
+
+v2
+    Produces a version 2 bundle.
+
+    Version 2 bundles are an extensible format that can store additional
+    repository data (such as bookmarks and phases information) and they can
+    store data more efficiently, resulting in smaller bundles.
+
+    Version 2 bundles can also use modern compression engines, such as
+    ``zstd``, making them faster to compress and often smaller.
+
+Available Compression Engines
+=============================
+
+The following bundle <compression> engines can be used:
+
+.. bundlecompressionmarker
+
+Examples
+========
+
+``v2``
+    Produce a ``v2`` bundle using default options, including compression.
+
+``none-v1``
+    Produce a ``v1`` bundle with no compression.
+
+``zstd-v2``
+    Produce a ``v2`` bundle with zstandard compression using default
+    settings.
+
+``zstd-v1``
+    This errors because ``zstd`` is not supported for ``v1`` types.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/color.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,149 @@
+Mercurial colorizes output from several commands.
+
+For example, the diff command shows additions in green and deletions
+in red, while the status command shows modified files in magenta. Many
+other commands have analogous colors. It is possible to customize
+these colors.
+
+To enable color (default) whenever possible use::
+
+  [ui]
+  color = yes
+
+To disable color use::
+
+  [ui]
+  color = no
+
+See :hg:`help config.ui.color` for details.
+
+.. container:: windows
+
+  The default pager on Windows does not support color, so enabling the pager
+  will effectively disable color.  See :hg:`help config.ui.paginate` to disable
+  the pager.  Alternately, MSYS and Cygwin shells provide `less` as a pager,
+  which can be configured to support ANSI color mode.  Windows 10 natively
+  supports ANSI color mode.
+
+Mode
+====
+
+Mercurial can use various systems to display color. The supported modes are
+``ansi``, ``win32``, and ``terminfo``.  See :hg:`help config.color` for details
+about how to control the mode.
+
+Effects
+=======
+
+Other effects in addition to color, like bold and underlined text, are
+also available. By default, the terminfo database is used to find the
+terminal codes used to change color and effect.  If terminfo is not
+available, then effects are rendered with the ECMA-48 SGR control
+function (aka ANSI escape codes).
+
+The available effects in terminfo mode are 'blink', 'bold', 'dim',
+'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
+ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
+'underline'.  How each is rendered depends on the terminal emulator.
+Some may not be available for a given terminal type, and will be
+silently ignored.
+
+If the terminfo entry for your terminal is missing codes for an effect
+or has the wrong codes, you can add or override those codes in your
+configuration::
+
+  [color]
+  terminfo.dim = \E[2m
+
+where '\E' is substituted with an escape character.
+
+Labels
+======
+
+Text receives color effects depending on the labels that it has. Many
+default Mercurial commands emit labelled text. You can also define
+your own labels in templates using the label function, see :hg:`help
+templates`. A single portion of text may have more than one label. In
+that case, effects given to the last label will override any other
+effects. This includes the special "none" effect, which nullifies
+other effects.
+
+Labels are normally invisible. In order to see these labels and their
+position in the text, use the global --color=debug option. The same
+anchor text may be associated to multiple labels, e.g.
+
+  [log.changeset changeset.secret|changeset:   22611:6f0a53c8f587]
+
+The following are the default effects for some default labels. Default
+effects may be overridden from your configuration file::
+
+  [color]
+  status.modified = blue bold underline red_background
+  status.added = green bold
+  status.removed = red bold blue_background
+  status.deleted = cyan bold underline
+  status.unknown = magenta bold underline
+  status.ignored = black bold
+
+  # 'none' turns off all effects
+  status.clean = none
+  status.copied = none
+
+  qseries.applied = blue bold underline
+  qseries.unapplied = black bold
+  qseries.missing = red bold
+
+  diff.diffline = bold
+  diff.extended = cyan bold
+  diff.file_a = red bold
+  diff.file_b = green bold
+  diff.hunk = magenta
+  diff.deleted = red
+  diff.inserted = green
+  diff.changed = white
+  diff.tab =
+  diff.trailingwhitespace = bold red_background
+
+  # Blank so it inherits the style of the surrounding label
+  changeset.public =
+  changeset.draft =
+  changeset.secret =
+
+  resolve.unresolved = red bold
+  resolve.resolved = green bold
+
+  bookmarks.active = green
+
+  branches.active = none
+  branches.closed = black bold
+  branches.current = green
+  branches.inactive = none
+
+  tags.normal = green
+  tags.local = black bold
+
+  rebase.rebased = blue
+  rebase.remaining = red bold
+
+  shelve.age = cyan
+  shelve.newest = green bold
+  shelve.name = blue bold
+
+  histedit.remaining = red bold
+
+Custom colors
+=============
+
+Because there are only eight standard colors, Mercurial allows you
+to define color names for other color slots which might be available
+for your terminal type, assuming terminfo mode.  For instance::
+
+  color.brightblue = 12
+  color.pink = 207
+  color.orange = 202
+
+to set 'brightblue' to color slot 12 (useful for 16 color terminals
+that have brighter colors defined in the upper eight) and, 'pink' and
+'orange' to colors in 256-color xterm's default color cube.  These
+defined colors may then be used as any of the pre-defined eight,
+including appending '_background' to set the background to that color.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/common.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,8 @@
+.. Common link and substitution definitions.
+
+.. |hg(1)| replace:: **hg**\ (1)
+.. _hg(1): hg.1.html
+.. |hgrc(5)| replace:: **hgrc**\ (5)
+.. _hgrc(5): hgrc.5.html
+.. |hgignore(5)| replace:: **hgignore**\ (5)
+.. _hgignore(5): hgignore.5.html
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/config.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,2877 @@
+The Mercurial system uses a set of configuration files to control
+aspects of its behavior.
+
+Troubleshooting
+===============
+
+If you're having problems with your configuration,
+:hg:`config --debug` can help you understand what is introducing
+a setting into your environment.
+
+See :hg:`help config.syntax` and :hg:`help config.files`
+for information about how and where to override things.
+
+Structure
+=========
+
+The configuration files use a simple ini-file format. A configuration
+file consists of sections, led by a ``[section]`` header and followed
+by ``name = value`` entries::
+
+  [ui]
+  username = Firstname Lastname <firstname.lastname@example.net>
+  verbose = True
+
+The above entries will be referred to as ``ui.username`` and
+``ui.verbose``, respectively. See :hg:`help config.syntax`.
+
+Files
+=====
+
+Mercurial reads configuration data from several files, if they exist.
+These files do not exist by default and you will have to create the
+appropriate configuration files yourself:
+
+Local configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
+
+Global configuration like the username setting is typically put into:
+
+.. container:: windows
+
+  - ``%USERPROFILE%\mercurial.ini`` (on Windows)
+
+.. container:: unix.plan9
+
+  - ``$HOME/.hgrc`` (on Unix, Plan9)
+
+The names of these files depend on the system on which Mercurial is
+installed. ``*.rc`` files from a single directory are read in
+alphabetical order, later ones overriding earlier ones. Where multiple
+paths are given below, settings from earlier paths override later
+ones.
+
+.. container:: verbose.unix
+
+  On Unix, the following files are consulted:
+
+  - ``<repo>/.hg/hgrc`` (per-repository)
+  - ``$HOME/.hgrc`` (per-user)
+  - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
+  - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
+  - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
+  - ``/etc/mercurial/hgrc`` (per-system)
+  - ``/etc/mercurial/hgrc.d/*.rc`` (per-system)
+  - ``<internal>/*.rc`` (defaults)
+
+.. container:: verbose.windows
+
+  On Windows, the following files are consulted:
+
+  - ``<repo>/.hg/hgrc`` (per-repository)
+  - ``%USERPROFILE%\.hgrc`` (per-user)
+  - ``%USERPROFILE%\Mercurial.ini`` (per-user)
+  - ``%HOME%\.hgrc`` (per-user)
+  - ``%HOME%\Mercurial.ini`` (per-user)
+  - ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (per-system)
+  - ``<install-dir>\hgrc.d\*.rc`` (per-installation)
+  - ``<install-dir>\Mercurial.ini`` (per-installation)
+  - ``%PROGRAMDATA%\Mercurial\hgrc`` (per-system)
+  - ``%PROGRAMDATA%\Mercurial\Mercurial.ini`` (per-system)
+  - ``%PROGRAMDATA%\Mercurial\hgrc.d\*.rc`` (per-system)
+  - ``<internal>/*.rc`` (defaults)
+
+  .. note::
+
+   The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial``
+   is used when running 32-bit Python on 64-bit Windows.
+
+.. container:: verbose.plan9
+
+  On Plan9, the following files are consulted:
+
+  - ``<repo>/.hg/hgrc`` (per-repository)
+  - ``$home/lib/hgrc`` (per-user)
+  - ``<install-root>/lib/mercurial/hgrc`` (per-installation)
+  - ``<install-root>/lib/mercurial/hgrc.d/*.rc`` (per-installation)
+  - ``/lib/mercurial/hgrc`` (per-system)
+  - ``/lib/mercurial/hgrc.d/*.rc`` (per-system)
+  - ``<internal>/*.rc`` (defaults)
+
+Per-repository configuration options only apply in a
+particular repository. This file is not version-controlled, and
+will not get transferred during a "clone" operation. Options in
+this file override options in all other configuration files.
+
+.. container:: unix.plan9
+
+  On Plan 9 and Unix, most of this file will be ignored if it doesn't
+  belong to a trusted user or to a trusted group. See
+  :hg:`help config.trusted` for more details.
+
+Per-user configuration file(s) are for the user running Mercurial.  Options
+in these files apply to all Mercurial commands executed by this user in any
+directory. Options in these files override per-system and per-installation
+options.
+
+Per-installation configuration files are searched for in the
+directory where Mercurial is installed. ``<install-root>`` is the
+parent directory of the **hg** executable (or symlink) being run.
+
+.. container:: unix.plan9
+
+  For example, if installed in ``/shared/tools/bin/hg``, Mercurial
+  will look in ``/shared/tools/etc/mercurial/hgrc``. Options in these
+  files apply to all Mercurial commands executed by any user in any
+  directory.
+
+Per-installation configuration files are for the system on
+which Mercurial is running. Options in these files apply to all
+Mercurial commands executed by any user in any directory. Registry
+keys contain PATH-like strings, every part of which must reference
+a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
+be read.  Mercurial checks each of these locations in the specified
+order until one or more configuration files are detected.
+
+Per-system configuration files are for the system on which Mercurial
+is running. Options in these files apply to all Mercurial commands
+executed by any user in any directory. Options in these files
+override per-installation options.
+
+Mercurial comes with some default configuration. The default configuration
+files are installed with Mercurial and will be overwritten on upgrades. Default
+configuration files should never be edited by users or administrators but can
+be overridden in other configuration files. So far the directory only contains
+merge tool configuration but packagers can also put other default configuration
+there.
+
+Syntax
+======
+
+A configuration file consists of sections, led by a ``[section]`` header
+and followed by ``name = value`` entries (sometimes called
+``configuration keys``)::
+
+    [spam]
+    eggs=ham
+    green=
+       eggs
+
+Each line contains one entry. If the lines that follow are indented,
+they are treated as continuations of that entry. Leading whitespace is
+removed from values. Empty lines are skipped. Lines beginning with
+``#`` or ``;`` are ignored and may be used to provide comments.
+
+Configuration keys can be set multiple times, in which case Mercurial
+will use the value that was configured last. As an example::
+
+    [spam]
+    eggs=large
+    ham=serrano
+    eggs=small
+
+This would set the configuration key named ``eggs`` to ``small``.
+
+It is also possible to define a section multiple times. A section can
+be redefined on the same and/or on different configuration files. For
+example::
+
+    [foo]
+    eggs=large
+    ham=serrano
+    eggs=small
+
+    [bar]
+    eggs=ham
+    green=
+       eggs
+
+    [foo]
+    ham=prosciutto
+    eggs=medium
+    bread=toasted
+
+This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
+of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
+respectively. As you can see there only thing that matters is the last
+value that was set for each of the configuration keys.
+
+If a configuration key is set multiple times in different
+configuration files the final value will depend on the order in which
+the different configuration files are read, with settings from earlier
+paths overriding later ones as described on the ``Files`` section
+above.
+
+A line of the form ``%include file`` will include ``file`` into the
+current configuration file. The inclusion is recursive, which means
+that included files can include other files. Filenames are relative to
+the configuration file in which the ``%include`` directive is found.
+Environment variables and ``~user`` constructs are expanded in
+``file``. This lets you do something like::
+
+  %include ~/.hgrc.d/$HOST.rc
+
+to include a different configuration file on each computer you use.
+
+A line with ``%unset name`` will remove ``name`` from the current
+section, if it has been set previously.
+
+The values are either free-form text strings, lists of text strings,
+or Boolean values. Boolean values can be set to true using any of "1",
+"yes", "true", or "on" and to false using "0", "no", "false", or "off"
+(all case insensitive).
+
+List values are separated by whitespace or comma, except when values are
+placed in double quotation marks::
+
+  allow_read = "John Doe, PhD", brian, betty
+
+Quotation marks can be escaped by prefixing them with a backslash. Only
+quotation marks at the beginning of a word is counted as a quotation
+(e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
+
+Sections
+========
+
+This section describes the different sections that may appear in a
+Mercurial configuration file, the purpose of each section, its possible
+keys, and their possible values.
+
+``alias``
+---------
+
+Defines command aliases.
+
+Aliases allow you to define your own commands in terms of other
+commands (or aliases), optionally including arguments. Positional
+arguments in the form of ``$1``, ``$2``, etc. in the alias definition
+are expanded by Mercurial before execution. Positional arguments not
+already used by ``$N`` in the definition are put at the end of the
+command to be executed.
+
+Alias definitions consist of lines of the form::
+
+    <alias> = <command> [<argument>]...
+
+For example, this definition::
+
+    latest = log --limit 5
+
+creates a new command ``latest`` that shows only the five most recent
+changesets. You can define subsequent aliases using earlier ones::
+
+    stable5 = latest -b stable
+
+.. note::
+
+   It is possible to create aliases with the same names as
+   existing commands, which will then override the original
+   definitions. This is almost always a bad idea!
+
+An alias can start with an exclamation point (``!``) to make it a
+shell alias. A shell alias is executed with the shell and will let you
+run arbitrary commands. As an example, ::
+
+   echo = !echo $@
+
+will let you do ``hg echo foo`` to have ``foo`` printed in your
+terminal. A better example might be::
+
+   purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
+
+which will make ``hg purge`` delete all unknown files in the
+repository in the same manner as the purge extension.
+
+Positional arguments like ``$1``, ``$2``, etc. in the alias definition
+expand to the command arguments. Unmatched arguments are
+removed. ``$0`` expands to the alias name and ``$@`` expands to all
+arguments separated by a space. ``"$@"`` (with quotes) expands to all
+arguments quoted individually and separated by a space. These expansions
+happen before the command is passed to the shell.
+
+Shell aliases are executed in an environment where ``$HG`` expands to
+the path of the Mercurial that was used to execute the alias. This is
+useful when you want to call further Mercurial commands in a shell
+alias, as was done above for the purge alias. In addition,
+``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg
+echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
+
+.. note::
+
+   Some global configuration options such as ``-R`` are
+   processed before shell aliases and will thus not be passed to
+   aliases.
+
+
+``annotate``
+------------
+
+Settings used when displaying file annotations. All values are
+Booleans and default to False. See :hg:`help config.diff` for
+related options for the diff command.
+
+``ignorews``
+    Ignore white space when comparing lines.
+
+``ignorewseol``
+    Ignore white space at the end of a line when comparing lines.
+
+``ignorewsamount``
+    Ignore changes in the amount of white space.
+
+``ignoreblanklines``
+    Ignore changes whose lines are all blank.
+
+
+``auth``
+--------
+
+Authentication credentials and other authentication-like configuration
+for HTTP connections. This section allows you to store usernames and
+passwords for use when logging *into* HTTP servers. See
+:hg:`help config.web` if you want to configure *who* can login to
+your HTTP server.
+
+The following options apply to all hosts.
+
+``cookiefile``
+    Path to a file containing HTTP cookie lines. Cookies matching a
+    host will be sent automatically.
+
+    The file format uses the Mozilla cookies.txt format, which defines cookies
+    on their own lines. Each line contains 7 fields delimited by the tab
+    character (domain, is_domain_cookie, path, is_secure, expires, name,
+    value). For more info, do an Internet search for "Netscape cookies.txt
+    format."
+
+    Note: the cookies parser does not handle port numbers on domains. You
+    will need to remove ports from the domain for the cookie to be recognized.
+    This could result in a cookie being disclosed to an unwanted server.
+
+    The cookies file is read-only.
+
+Other options in this section are grouped by name and have the following
+format::
+
+    <name>.<argument> = <value>
+
+where ``<name>`` is used to group arguments into authentication
+entries. Example::
+
+    foo.prefix = hg.intevation.de/mercurial
+    foo.username = foo
+    foo.password = bar
+    foo.schemes = http https
+
+    bar.prefix = secure.example.org
+    bar.key = path/to/file.key
+    bar.cert = path/to/file.cert
+    bar.schemes = https
+
+Supported arguments:
+
+``prefix``
+    Either ``*`` or a URI prefix with or without the scheme part.
+    The authentication entry with the longest matching prefix is used
+    (where ``*`` matches everything and counts as a match of length
+    1). If the prefix doesn't include a scheme, the match is performed
+    against the URI with its scheme stripped as well, and the schemes
+    argument, q.v., is then subsequently consulted.
+
+``username``
+    Optional. Username to authenticate with. If not given, and the
+    remote site requires basic or digest authentication, the user will
+    be prompted for it. Environment variables are expanded in the
+    username letting you do ``foo.username = $USER``. If the URI
+    includes a username, only ``[auth]`` entries with a matching
+    username or without a username will be considered.
+
+``password``
+    Optional. Password to authenticate with. If not given, and the
+    remote site requires basic or digest authentication, the user
+    will be prompted for it.
+
+``key``
+    Optional. PEM encoded client certificate key file. Environment
+    variables are expanded in the filename.
+
+``cert``
+    Optional. PEM encoded client certificate chain file. Environment
+    variables are expanded in the filename.
+
+``schemes``
+    Optional. Space separated list of URI schemes to use this
+    authentication entry with. Only used if the prefix doesn't include
+    a scheme. Supported schemes are http and https. They will match
+    static-http and static-https respectively, as well.
+    (default: https)
+
+If no suitable authentication entry is found, the user is prompted
+for credentials as usual if required by the remote.
+
+``color``
+---------
+
+Configure the Mercurial color mode. For details about how to define your custom
+effect and style see :hg:`help color`.
+
+``mode``
+    String: control the method used to output color. One of ``auto``, ``ansi``,
+    ``win32``, ``terminfo`` or ``debug``. In auto mode, Mercurial will
+    use ANSI mode by default (or win32 mode prior to Windows 10) if it detects a
+    terminal. Any invalid value will disable color.
+
+``pagermode``
+    String: optional override of ``color.mode`` used with pager.
+
+    On some systems, terminfo mode may cause problems when using
+    color with ``less -R`` as a pager program. less with the -R option
+    will only display ECMA-48 color codes, and terminfo mode may sometimes
+    emit codes that less doesn't understand. You can work around this by
+    either using ansi mode (or auto mode), or by using less -r (which will
+    pass through all terminal control codes, not just color control
+    codes).
+
+    On some systems (such as MSYS in Windows), the terminal may support
+    a different color mode than the pager program.
+
+``commands``
+------------
+
+``commit.post-status``
+    Show status of files in the working directory after successful commit.
+    (default: False)
+
+``merge.require-rev``
+    Require that the revision to merge the current commit with be specified on
+    the command line. If this is enabled and a revision is not specified, the
+    command aborts.
+    (default: False)
+
+``push.require-revs``
+    Require revisions to push be specified using one or more mechanisms such as
+    specifying them positionally on the command line, using ``-r``, ``-b``,
+    and/or ``-B`` on the command line, or using ``paths.<path>:pushrev`` in the
+    configuration. If this is enabled and revisions are not specified, the
+    command aborts.
+    (default: False)
+
+``resolve.confirm``
+    Confirm before performing action if no filename is passed.
+    (default: False)
+
+``resolve.explicit-re-merge``
+    Require uses of ``hg resolve`` to specify which action it should perform,
+    instead of re-merging files by default.
+    (default: False)
+
+``resolve.mark-check``
+    Determines what level of checking :hg:`resolve --mark` will perform before
+    marking files as resolved. Valid values are ``none`, ``warn``, and
+    ``abort``. ``warn`` will output a warning listing the file(s) that still
+    have conflict markers in them, but will still mark everything resolved.
+    ``abort`` will output the same warning but will not mark things as resolved.
+    If --all is passed and this is set to ``abort``, only a warning will be
+    shown (an error will not be raised).
+    (default: ``none``)
+
+``status.relative``
+    Make paths in :hg:`status` output relative to the current directory.
+    (default: False)
+
+``status.terse``
+    Default value for the --terse flag, which condenses status output.
+    (default: empty)
+
+``update.check``
+    Determines what level of checking :hg:`update` will perform before moving
+    to a destination revision. Valid values are ``abort``, ``none``,
+    ``linear``, and ``noconflict``. ``abort`` always fails if the working
+    directory has uncommitted changes. ``none`` performs no checking, and may
+    result in a merge with uncommitted changes. ``linear`` allows any update
+    as long as it follows a straight line in the revision history, and may
+    trigger a merge with uncommitted changes. ``noconflict`` will allow any
+    update which would not trigger a merge with uncommitted changes, if any
+    are present.
+    (default: ``linear``)
+
+``update.requiredest``
+    Require that the user pass a destination when running :hg:`update`.
+    For example, :hg:`update .::` will be allowed, but a plain :hg:`update`
+    will be disallowed.
+    (default: False)
+
+``committemplate``
+------------------
+
+``changeset``
+    String: configuration in this section is used as the template to
+    customize the text shown in the editor when committing.
+
+In addition to pre-defined template keywords, commit log specific one
+below can be used for customization:
+
+``extramsg``
+    String: Extra message (typically 'Leave message empty to abort
+    commit.'). This may be changed by some commands or extensions.
+
+For example, the template configuration below shows as same text as
+one shown by default::
+
+    [committemplate]
+    changeset = {desc}\n\n
+        HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+        HG: {extramsg}
+        HG: --
+        HG: user: {author}\n{ifeq(p2rev, "-1", "",
+       "HG: branch merge\n")
+       }HG: branch '{branch}'\n{if(activebookmark,
+       "HG: bookmark '{activebookmark}'\n")   }{subrepos %
+       "HG: subrepo {subrepo}\n"              }{file_adds %
+       "HG: added {file}\n"                   }{file_mods %
+       "HG: changed {file}\n"                 }{file_dels %
+       "HG: removed {file}\n"                 }{if(files, "",
+       "HG: no files changed\n")}
+
+``diff()``
+    String: show the diff (see :hg:`help templates` for detail)
+
+Sometimes it is helpful to show the diff of the changeset in the editor without
+having to prefix 'HG: ' to each line so that highlighting works correctly. For
+this, Mercurial provides a special string which will ignore everything below
+it::
+
+     HG: ------------------------ >8 ------------------------
+
+For example, the template configuration below will show the diff below the
+extra message::
+
+    [committemplate]
+    changeset = {desc}\n\n
+        HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+        HG: {extramsg}
+        HG: ------------------------ >8 ------------------------
+        HG: Do not touch the line above.
+        HG: Everything below will be removed.
+        {diff()}
+
+.. note::
+
+   For some problematic encodings (see :hg:`help win32mbcs` for
+   detail), this customization should be configured carefully, to
+   avoid showing broken characters.
+
+   For example, if a multibyte character ending with backslash (0x5c) is
+   followed by the ASCII character 'n' in the customized template,
+   the sequence of backslash and 'n' is treated as line-feed unexpectedly
+   (and the multibyte character is broken, too).
+
+Customized template is used for commands below (``--edit`` may be
+required):
+
+- :hg:`backout`
+- :hg:`commit`
+- :hg:`fetch` (for merge commit only)
+- :hg:`graft`
+- :hg:`histedit`
+- :hg:`import`
+- :hg:`qfold`, :hg:`qnew` and :hg:`qrefresh`
+- :hg:`rebase`
+- :hg:`shelve`
+- :hg:`sign`
+- :hg:`tag`
+- :hg:`transplant`
+
+Configuring items below instead of ``changeset`` allows showing
+customized message only for specific actions, or showing different
+messages for each action.
+
+- ``changeset.backout`` for :hg:`backout`
+- ``changeset.commit.amend.merge`` for :hg:`commit --amend` on merges
+- ``changeset.commit.amend.normal`` for :hg:`commit --amend` on other
+- ``changeset.commit.normal.merge`` for :hg:`commit` on merges
+- ``changeset.commit.normal.normal`` for :hg:`commit` on other
+- ``changeset.fetch`` for :hg:`fetch` (impling merge commit)
+- ``changeset.gpg.sign`` for :hg:`sign`
+- ``changeset.graft`` for :hg:`graft`
+- ``changeset.histedit.edit`` for ``edit`` of :hg:`histedit`
+- ``changeset.histedit.fold`` for ``fold`` of :hg:`histedit`
+- ``changeset.histedit.mess`` for ``mess`` of :hg:`histedit`
+- ``changeset.histedit.pick`` for ``pick`` of :hg:`histedit`
+- ``changeset.import.bypass`` for :hg:`import --bypass`
+- ``changeset.import.normal.merge`` for :hg:`import` on merges
+- ``changeset.import.normal.normal`` for :hg:`import` on other
+- ``changeset.mq.qnew`` for :hg:`qnew`
+- ``changeset.mq.qfold`` for :hg:`qfold`
+- ``changeset.mq.qrefresh`` for :hg:`qrefresh`
+- ``changeset.rebase.collapse`` for :hg:`rebase --collapse`
+- ``changeset.rebase.merge`` for :hg:`rebase` on merges
+- ``changeset.rebase.normal`` for :hg:`rebase` on other
+- ``changeset.shelve.shelve`` for :hg:`shelve`
+- ``changeset.tag.add`` for :hg:`tag` without ``--remove``
+- ``changeset.tag.remove`` for :hg:`tag --remove`
+- ``changeset.transplant.merge`` for :hg:`transplant` on merges
+- ``changeset.transplant.normal`` for :hg:`transplant` on other
+
+These dot-separated lists of names are treated as hierarchical ones.
+For example, ``changeset.tag.remove`` customizes the commit message
+only for :hg:`tag --remove`, but ``changeset.tag`` customizes the
+commit message for :hg:`tag` regardless of ``--remove`` option.
+
+When the external editor is invoked for a commit, the corresponding
+dot-separated list of names without the ``changeset.`` prefix
+(e.g. ``commit.normal.normal``) is in the ``HGEDITFORM`` environment
+variable.
+
+In this section, items other than ``changeset`` can be referred from
+others. For example, the configuration to list committed files up
+below can be referred as ``{listupfiles}``::
+
+    [committemplate]
+    listupfiles = {file_adds %
+       "HG: added {file}\n"     }{file_mods %
+       "HG: changed {file}\n"   }{file_dels %
+       "HG: removed {file}\n"   }{if(files, "",
+       "HG: no files changed\n")}
+
+``decode/encode``
+-----------------
+
+Filters for transforming files on checkout/checkin. This would
+typically be used for newline processing or other
+localization/canonicalization of files.
+
+Filters consist of a filter pattern followed by a filter command.
+Filter patterns are globs by default, rooted at the repository root.
+For example, to match any file ending in ``.txt`` in the root
+directory only, use the pattern ``*.txt``. To match any file ending
+in ``.c`` anywhere in the repository, use the pattern ``**.c``.
+For each file only the first matching filter applies.
+
+The filter command can start with a specifier, either ``pipe:`` or
+``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
+
+A ``pipe:`` command must accept data on stdin and return the transformed
+data on stdout.
+
+Pipe example::
+
+  [encode]
+  # uncompress gzip files on checkin to improve delta compression
+  # note: not necessarily a good idea, just an example
+  *.gz = pipe: gunzip
+
+  [decode]
+  # recompress gzip files when writing them to the working dir (we
+  # can safely omit "pipe:", because it's the default)
+  *.gz = gzip
+
+A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
+with the name of a temporary file that contains the data to be
+filtered by the command. The string ``OUTFILE`` is replaced with the name
+of an empty temporary file, where the filtered data must be written by
+the command.
+
+.. container:: windows
+
+   .. note::
+
+     The tempfile mechanism is recommended for Windows systems,
+     where the standard shell I/O redirection operators often have
+     strange effects and may corrupt the contents of your files.
+
+This filter mechanism is used internally by the ``eol`` extension to
+translate line ending characters between Windows (CRLF) and Unix (LF)
+format. We suggest you use the ``eol`` extension for convenience.
+
+
+``defaults``
+------------
+
+(defaults are deprecated. Don't use them. Use aliases instead.)
+
+Use the ``[defaults]`` section to define command defaults, i.e. the
+default options/arguments to pass to the specified commands.
+
+The following example makes :hg:`log` run in verbose mode, and
+:hg:`status` show only the modified files, by default::
+
+  [defaults]
+  log = -v
+  status = -m
+
+The actual commands, instead of their aliases, must be used when
+defining command defaults. The command defaults will also be applied
+to the aliases of the commands defined.
+
+
+``diff``
+--------
+
+Settings used when displaying diffs. Everything except for ``unified``
+is a Boolean and defaults to False. See :hg:`help config.annotate`
+for related options for the annotate command.
+
+``git``
+    Use git extended diff format.
+
+``nobinary``
+    Omit git binary patches.
+
+``nodates``
+    Don't include dates in diff headers.
+
+``noprefix``
+    Omit 'a/' and 'b/' prefixes from filenames. Ignored in plain mode.
+
+``showfunc``
+    Show which function each change is in.
+
+``ignorews``
+    Ignore white space when comparing lines.
+
+``ignorewsamount``
+    Ignore changes in the amount of white space.
+
+``ignoreblanklines``
+    Ignore changes whose lines are all blank.
+
+``unified``
+    Number of lines of context to show.
+
+``word-diff``
+    Highlight changed words.
+
+``email``
+---------
+
+Settings for extensions that send email messages.
+
+``from``
+    Optional. Email address to use in "From" header and SMTP envelope
+    of outgoing messages.
+
+``to``
+    Optional. Comma-separated list of recipients' email addresses.
+
+``cc``
+    Optional. Comma-separated list of carbon copy recipients'
+    email addresses.
+
+``bcc``
+    Optional. Comma-separated list of blind carbon copy recipients'
+    email addresses.
+
+``method``
+    Optional. Method to use to send email messages. If value is ``smtp``
+    (default), use SMTP (see the ``[smtp]`` section for configuration).
+    Otherwise, use as name of program to run that acts like sendmail
+    (takes ``-f`` option for sender, list of recipients on command line,
+    message on stdin). Normally, setting this to ``sendmail`` or
+    ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
+
+``charsets``
+    Optional. Comma-separated list of character sets considered
+    convenient for recipients. Addresses, headers, and parts not
+    containing patches of outgoing messages will be encoded in the
+    first character set to which conversion from local encoding
+    (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
+    conversion fails, the text in question is sent as is.
+    (default: '')
+
+    Order of outgoing email character sets:
+
+    1. ``us-ascii``: always first, regardless of settings
+    2. ``email.charsets``: in order given by user
+    3. ``ui.fallbackencoding``: if not in email.charsets
+    4. ``$HGENCODING``: if not in email.charsets
+    5. ``utf-8``: always last, regardless of settings
+
+Email example::
+
+  [email]
+  from = Joseph User <joe.user@example.com>
+  method = /usr/sbin/sendmail
+  # charsets for western Europeans
+  # us-ascii, utf-8 omitted, as they are tried first and last
+  charsets = iso-8859-1, iso-8859-15, windows-1252
+
+
+``extensions``
+--------------
+
+Mercurial has an extension mechanism for adding new features. To
+enable an extension, create an entry for it in this section.
+
+If you know that the extension is already in Python's search path,
+you can give the name of the module, followed by ``=``, with nothing
+after the ``=``.
+
+Otherwise, give a name that you choose, followed by ``=``, followed by
+the path to the ``.py`` file (including the file name extension) that
+defines the extension.
+
+To explicitly disable an extension that is enabled in an hgrc of
+broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
+or ``foo = !`` when path is not supplied.
+
+Example for ``~/.hgrc``::
+
+  [extensions]
+  # (the churn extension will get loaded from Mercurial's path)
+  churn =
+  # (this extension will get loaded from the file specified)
+  myfeature = ~/.hgext/myfeature.py
+
+
+``format``
+----------
+
+Configuration that controls the repository format. Newer format options are more
+powerful, but incompatible with some older versions of Mercurial. Format options
+are considered at repository initialization only. You need to make a new clone
+for config changes to be taken into account.
+
+For more details about repository format and version compatibility, see
+https://www.mercurial-scm.org/wiki/MissingRequirement
+
+``usegeneraldelta``
+    Enable or disable the "generaldelta" repository format which improves
+    repository compression by allowing "revlog" to store deltas against
+    arbitrary revisions instead of the previously stored one. This provides
+    significant improvement for repositories with branches.
+
+    Repositories with this on-disk format require Mercurial version 1.9.
+
+    Enabled by default.
+
+``dotencode``
+    Enable or disable the "dotencode" repository format which enhances
+    the "fncache" repository format (which has to be enabled to use
+    dotencode) to avoid issues with filenames starting with "._" on
+    Mac OS X and spaces on Windows.
+
+    Repositories with this on-disk format require Mercurial version 1.7.
+
+    Enabled by default.
+
+``usefncache``
+    Enable or disable the "fncache" repository format which enhances
+    the "store" repository format (which has to be enabled to use
+    fncache) to allow longer filenames and avoids using Windows
+    reserved names, e.g. "nul".
+
+    Repositories with this on-disk format require Mercurial version 1.1.
+
+    Enabled by default.
+
+``usestore``
+    Enable or disable the "store" repository format which improves
+    compatibility with systems that fold case or otherwise mangle
+    filenames. Disabling this option will allow you to store longer filenames
+    in some situations at the expense of compatibility.
+
+    Repositories with this on-disk format require Mercurial version 0.9.4.
+
+    Enabled by default.
+
+``sparse-revlog``
+    Enable or disable the ``sparse-revlog`` delta strategy. This format improves
+    delta re-use inside revlog. For very branchy repositories, it results in a
+    smaller store. For repositories with many revisions, it also helps
+    performance (by using shortened delta chains.)
+
+    Repositories with this on-disk format require Mercurial version 4.7
+
+    Enabled by default.
+
+``revlog-compression``
+    Compression algorithm used by revlog. Supported values are `zlib` and
+    `zstd`. The `zlib` engine is the historical default of Mercurial. `zstd` is
+    a newer format that is usually a net win over `zlib`, operating faster at
+    better compression rates. Use `zstd` to reduce CPU usage.
+
+    On some systems, the Mercurial installation may lack `zstd` support.
+
+    Default is `zlib`.
+
+``bookmarks-in-store``
+    Store bookmarks in .hg/store/. This means that bookmarks are shared when
+    using `hg share` regardless of the `-B` option.
+
+    Repositories with this on-disk format require Mercurial version 5.1.
+
+    Disabled by default.
+
+
+``graph``
+---------
+
+Web graph view configuration. This section let you change graph
+elements display properties by branches, for instance to make the
+``default`` branch stand out.
+
+Each line has the following format::
+
+    <branch>.<argument> = <value>
+
+where ``<branch>`` is the name of the branch being
+customized. Example::
+
+    [graph]
+    # 2px width
+    default.width = 2
+    # red color
+    default.color = FF0000
+
+Supported arguments:
+
+``width``
+    Set branch edges width in pixels.
+
+``color``
+    Set branch edges color in hexadecimal RGB notation.
+
+``hooks``
+---------
+
+Commands or Python functions that get automatically executed by
+various actions such as starting or finishing a commit. Multiple
+hooks can be run for the same action by appending a suffix to the
+action. Overriding a site-wide hook can be done by changing its
+value or setting it to an empty string.  Hooks can be prioritized
+by adding a prefix of ``priority.`` to the hook name on a new line
+and setting the priority. The default priority is 0.
+
+Example ``.hg/hgrc``::
+
+  [hooks]
+  # update working directory after adding changesets
+  changegroup.update = hg update
+  # do not use the site-wide hook
+  incoming =
+  incoming.email = /my/email/hook
+  incoming.autobuild = /my/build/hook
+  # force autobuild hook to run before other incoming hooks
+  priority.incoming.autobuild = 1
+
+Most hooks are run with environment variables set that give useful
+additional information. For each hook below, the environment variables
+it is passed are listed with names in the form ``$HG_foo``. The
+``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
+They contain the type of hook which triggered the run and the full name
+of the hook in the config, respectively. In the example above, this will
+be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
+
+.. container:: windows
+
+  Some basic Unix syntax can be enabled for portability, including ``$VAR``
+  and ``${VAR}`` style variables.  A ``~`` followed by ``\`` or ``/`` will
+  be expanded to ``%USERPROFILE%`` to simulate a subset of tilde expansion
+  on Unix.  To use a literal ``$`` or ``~``, it must be escaped with a back
+  slash or inside of a strong quote.  Strong quotes will be replaced by
+  double quotes after processing.
+
+  This feature is enabled by adding a prefix of ``tonative.`` to the hook
+  name on a new line, and setting it to ``True``.  For example::
+
+    [hooks]
+    incoming.autobuild = /my/build/hook
+    # enable translation to cmd.exe syntax for autobuild hook
+    tonative.incoming.autobuild = True
+
+``changegroup``
+  Run after a changegroup has been added via push, pull or unbundle.  The ID of
+  the first new changeset is in ``$HG_NODE`` and last is in ``$HG_NODE_LAST``.
+  The URL from which changes came is in ``$HG_URL``.
+
+``commit``
+  Run after a changeset has been created in the local repository. The ID
+  of the newly created changeset is in ``$HG_NODE``. Parent changeset
+  IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
+
+``incoming``
+  Run after a changeset has been pulled, pushed, or unbundled into
+  the local repository. The ID of the newly arrived changeset is in
+  ``$HG_NODE``. The URL that was source of the changes is in ``$HG_URL``.
+
+``outgoing``
+  Run after sending changes from the local repository to another. The ID of
+  first changeset sent is in ``$HG_NODE``. The source of operation is in
+  ``$HG_SOURCE``. Also see :hg:`help config.hooks.preoutgoing`.
+
+``post-<command>``
+  Run after successful invocations of the associated command. The
+  contents of the command line are passed as ``$HG_ARGS`` and the result
+  code in ``$HG_RESULT``. Parsed command line arguments are passed as
+  ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
+  the python data internally passed to <command>. ``$HG_OPTS`` is a
+  dictionary of options (with unspecified options set to their defaults).
+  ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
+
+``fail-<command>``
+  Run after a failed invocation of an associated command. The contents
+  of the command line are passed as ``$HG_ARGS``. Parsed command line
+  arguments are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain
+  string representations of the python data internally passed to
+  <command>. ``$HG_OPTS`` is a dictionary of options (with unspecified
+  options set to their defaults). ``$HG_PATS`` is a list of arguments.
+  Hook failure is ignored.
+
+``pre-<command>``
+  Run before executing the associated command. The contents of the
+  command line are passed as ``$HG_ARGS``. Parsed command line arguments
+  are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
+  representations of the data internally passed to <command>. ``$HG_OPTS``
+  is a dictionary of options (with unspecified options set to their
+  defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
+  failure, the command doesn't execute and Mercurial returns the failure
+  code.
+
+``prechangegroup``
+  Run before a changegroup is added via push, pull or unbundle. Exit
+  status 0 allows the changegroup to proceed. A non-zero status will
+  cause the push, pull or unbundle to fail. The URL from which changes
+  will come is in ``$HG_URL``.
+
+``precommit``
+  Run before starting a local commit. Exit status 0 allows the
+  commit to proceed. A non-zero status will cause the commit to fail.
+  Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
+
+``prelistkeys``
+  Run before listing pushkeys (like bookmarks) in the
+  repository. A non-zero status will cause failure. The key namespace is
+  in ``$HG_NAMESPACE``.
+
+``preoutgoing``
+  Run before collecting changes to send from the local repository to
+  another. A non-zero status will cause failure. This lets you prevent
+  pull over HTTP or SSH. It can also prevent propagating commits (via
+  local pull, push (outbound) or bundle commands), but not completely,
+  since you can just copy files instead. The source of operation is in
+  ``$HG_SOURCE``. If "serve", the operation is happening on behalf of a remote
+  SSH or HTTP repository. If "push", "pull" or "bundle", the operation
+  is happening on behalf of a repository on same system.
+
+``prepushkey``
+  Run before a pushkey (like a bookmark) is added to the
+  repository. A non-zero status will cause the key to be rejected. The
+  key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
+  the old value (if any) is in ``$HG_OLD``, and the new value is in
+  ``$HG_NEW``.
+
+``pretag``
+  Run before creating a tag. Exit status 0 allows the tag to be
+  created. A non-zero status will cause the tag to fail. The ID of the
+  changeset to tag is in ``$HG_NODE``. The name of tag is in ``$HG_TAG``. The
+  tag is local if ``$HG_LOCAL=1``, or in the repository if ``$HG_LOCAL=0``.
+
+``pretxnopen``
+  Run before any new repository transaction is open. The reason for the
+  transaction will be in ``$HG_TXNNAME``, and a unique identifier for the
+  transaction will be in ``HG_TXNID``. A non-zero status will prevent the
+  transaction from being opened.
+
+``pretxnclose``
+  Run right before the transaction is actually finalized. Any repository change
+  will be visible to the hook program. This lets you validate the transaction
+  content or change it. Exit status 0 allows the commit to proceed. A non-zero
+  status will cause the transaction to be rolled back. The reason for the
+  transaction opening will be in ``$HG_TXNNAME``, and a unique identifier for
+  the transaction will be in ``HG_TXNID``. The rest of the available data will
+  vary according the transaction type. New changesets will add ``$HG_NODE``
+  (the ID of the first added changeset), ``$HG_NODE_LAST`` (the ID of the last
+  added changeset), ``$HG_URL`` and ``$HG_SOURCE`` variables.  Bookmark and
+  phase changes will set ``HG_BOOKMARK_MOVED`` and ``HG_PHASES_MOVED`` to ``1``
+  respectively, etc.
+
+``pretxnclose-bookmark``
+  Run right before a bookmark change is actually finalized. Any repository
+  change will be visible to the hook program. This lets you validate the
+  transaction content or change it. Exit status 0 allows the commit to
+  proceed. A non-zero status will cause the transaction to be rolled back.
+  The name of the bookmark will be available in ``$HG_BOOKMARK``, the new
+  bookmark location will be available in ``$HG_NODE`` while the previous
+  location will be available in ``$HG_OLDNODE``. In case of a bookmark
+  creation ``$HG_OLDNODE`` will be empty. In case of deletion ``$HG_NODE``
+  will be empty.
+  In addition, the reason for the transaction opening will be in
+  ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
+  ``HG_TXNID``.
+
+``pretxnclose-phase``
+  Run right before a phase change is actually finalized. Any repository change
+  will be visible to the hook program. This lets you validate the transaction
+  content or change it. Exit status 0 allows the commit to proceed.  A non-zero
+  status will cause the transaction to be rolled back. The hook is called
+  multiple times, once for each revision affected by a phase change.
+  The affected node is available in ``$HG_NODE``, the phase in ``$HG_PHASE``
+  while the previous ``$HG_OLDPHASE``. In case of new node, ``$HG_OLDPHASE``
+  will be empty.  In addition, the reason for the transaction opening will be in
+  ``$HG_TXNNAME``, and a unique identifier for the transaction will be in
+  ``HG_TXNID``. The hook is also run for newly added revisions. In this case
+  the ``$HG_OLDPHASE`` entry will be empty.
+
+``txnclose``
+  Run after any repository transaction has been committed. At this
+  point, the transaction can no longer be rolled back. The hook will run
+  after the lock is released. See :hg:`help config.hooks.pretxnclose` for
+  details about available variables.
+
+``txnclose-bookmark``
+  Run after any bookmark change has been committed. At this point, the
+  transaction can no longer be rolled back. The hook will run after the lock
+  is released. See :hg:`help config.hooks.pretxnclose-bookmark` for details
+  about available variables.
+
+``txnclose-phase``
+  Run after any phase change has been committed. At this point, the
+  transaction can no longer be rolled back. The hook will run after the lock
+  is released. See :hg:`help config.hooks.pretxnclose-phase` for details about
+  available variables.
+
+``txnabort``
+  Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose`
+  for details about available variables.
+
+``pretxnchangegroup``
+  Run after a changegroup has been added via push, pull or unbundle, but before
+  the transaction has been committed. The changegroup is visible to the hook
+  program. This allows validation of incoming changes before accepting them.
+  The ID of the first new changeset is in ``$HG_NODE`` and last is in
+  ``$HG_NODE_LAST``. Exit status 0 allows the transaction to commit. A non-zero
+  status will cause the transaction to be rolled back, and the push, pull or
+  unbundle will fail. The URL that was the source of changes is in ``$HG_URL``.
+
+``pretxncommit``
+  Run after a changeset has been created, but before the transaction is
+  committed. The changeset is visible to the hook program. This allows
+  validation of the commit message and changes. Exit status 0 allows the
+  commit to proceed. A non-zero status will cause the transaction to
+  be rolled back. The ID of the new changeset is in ``$HG_NODE``. The parent
+  changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
+
+``preupdate``
+  Run before updating the working directory. Exit status 0 allows
+  the update to proceed. A non-zero status will prevent the update.
+  The changeset ID of first new parent is in ``$HG_PARENT1``. If updating to a
+  merge, the ID of second new parent is in ``$HG_PARENT2``.
+
+``listkeys``
+  Run after listing pushkeys (like bookmarks) in the repository. The
+  key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
+  dictionary containing the keys and values.
+
+``pushkey``
+  Run after a pushkey (like a bookmark) is added to the
+  repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
+  ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
+  value is in ``$HG_NEW``.
+
+``tag``
+  Run after a tag is created. The ID of the tagged changeset is in ``$HG_NODE``.
+  The name of tag is in ``$HG_TAG``. The tag is local if ``$HG_LOCAL=1``, or in
+  the repository if ``$HG_LOCAL=0``.
+
+``update``
+  Run after updating the working directory. The changeset ID of first
+  new parent is in ``$HG_PARENT1``. If updating to a merge, the ID of second new
+  parent is in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
+  update failed (e.g. because conflicts were not resolved), ``$HG_ERROR=1``.
+
+.. note::
+
+   It is generally better to use standard hooks rather than the
+   generic pre- and post- command hooks, as they are guaranteed to be
+   called in the appropriate contexts for influencing transactions.
+   Also, hooks like "commit" will be called in all contexts that
+   generate a commit (e.g. tag) and not just the commit command.
+
+.. note::
+
+   Environment variables with empty values may not be passed to
+   hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
+   will have an empty value under Unix-like platforms for non-merge
+   changesets, while it will not be available at all under Windows.
+
+The syntax for Python hooks is as follows::
+
+  hookname = python:modulename.submodule.callable
+  hookname = python:/path/to/python/module.py:callable
+
+Python hooks are run within the Mercurial process. Each hook is
+called with at least three keyword arguments: a ui object (keyword
+``ui``), a repository object (keyword ``repo``), and a ``hooktype``
+keyword that tells what kind of hook is used. Arguments listed as
+environment variables above are passed as keyword arguments, with no
+``HG_`` prefix, and names in lower case.
+
+If a Python hook returns a "true" value or raises an exception, this
+is treated as a failure.
+
+
+``hostfingerprints``
+--------------------
+
+(Deprecated. Use ``[hostsecurity]``'s ``fingerprints`` options instead.)
+
+Fingerprints of the certificates of known HTTPS servers.
+
+A HTTPS connection to a server with a fingerprint configured here will
+only succeed if the servers certificate matches the fingerprint.
+This is very similar to how ssh known hosts works.
+
+The fingerprint is the SHA-1 hash value of the DER encoded certificate.
+Multiple values can be specified (separated by spaces or commas). This can
+be used to define both old and new fingerprints while a host transitions
+to a new certificate.
+
+The CA chain and web.cacerts is not used for servers with a fingerprint.
+
+For example::
+
+    [hostfingerprints]
+    hg.intevation.de = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
+    hg.intevation.org = fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
+
+``hostsecurity``
+----------------
+
+Used to specify global and per-host security settings for connecting to
+other machines.
+
+The following options control default behavior for all hosts.
+
+``ciphers``
+    Defines the cryptographic ciphers to use for connections.
+
+    Value must be a valid OpenSSL Cipher List Format as documented at
+    https://www.openssl.org/docs/manmaster/apps/ciphers.html#CIPHER-LIST-FORMAT.
+
+    This setting is for advanced users only. Setting to incorrect values
+    can significantly lower connection security or decrease performance.
+    You have been warned.
+
+    This option requires Python 2.7.
+
+``minimumprotocol``
+    Defines the minimum channel encryption protocol to use.
+
+    By default, the highest version of TLS supported by both client and server
+    is used.
+
+    Allowed values are: ``tls1.0``, ``tls1.1``, ``tls1.2``.
+
+    When running on an old Python version, only ``tls1.0`` is allowed since
+    old versions of Python only support up to TLS 1.0.
+
+    When running a Python that supports modern TLS versions, the default is
+    ``tls1.1``. ``tls1.0`` can still be used to allow TLS 1.0. However, this
+    weakens security and should only be used as a feature of last resort if
+    a server does not support TLS 1.1+.
+
+Options in the ``[hostsecurity]`` section can have the form
+``hostname``:``setting``. This allows multiple settings to be defined on a
+per-host basis.
+
+The following per-host settings can be defined.
+
+``ciphers``
+    This behaves like ``ciphers`` as described above except it only applies
+    to the host on which it is defined.
+
+``fingerprints``
+    A list of hashes of the DER encoded peer/remote certificate. Values have
+    the form ``algorithm``:``fingerprint``. e.g.
+    ``sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2``.
+    In addition, colons (``:``) can appear in the fingerprint part.
+
+    The following algorithms/prefixes are supported: ``sha1``, ``sha256``,
+    ``sha512``.
+
+    Use of ``sha256`` or ``sha512`` is preferred.
+
+    If a fingerprint is specified, the CA chain is not validated for this
+    host and Mercurial will require the remote certificate to match one
+    of the fingerprints specified. This means if the server updates its
+    certificate, Mercurial will abort until a new fingerprint is defined.
+    This can provide stronger security than traditional CA-based validation
+    at the expense of convenience.
+
+    This option takes precedence over ``verifycertsfile``.
+
+``minimumprotocol``
+    This behaves like ``minimumprotocol`` as described above except it
+    only applies to the host on which it is defined.
+
+``verifycertsfile``
+    Path to file a containing a list of PEM encoded certificates used to
+    verify the server certificate. Environment variables and ``~user``
+    constructs are expanded in the filename.
+
+    The server certificate or the certificate's certificate authority (CA)
+    must match a certificate from this file or certificate verification
+    will fail and connections to the server will be refused.
+
+    If defined, only certificates provided by this file will be used:
+    ``web.cacerts`` and any system/default certificates will not be
+    used.
+
+    This option has no effect if the per-host ``fingerprints`` option
+    is set.
+
+    The format of the file is as follows::
+
+        -----BEGIN CERTIFICATE-----
+        ... (certificate in base64 PEM encoding) ...
+        -----END CERTIFICATE-----
+        -----BEGIN CERTIFICATE-----
+        ... (certificate in base64 PEM encoding) ...
+        -----END CERTIFICATE-----
+
+For example::
+
+    [hostsecurity]
+    hg.example.com:fingerprints = sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
+    hg2.example.com:fingerprints = sha1:914f1aff87249c09b6859b88b1906d30756491ca, sha1:fc:e2:8d:d9:51:cd:cb:c1:4d:18:6b:b7:44:8d:49:72:57:e6:cd:33
+    hg3.example.com:fingerprints = sha256:9a:b0:dc:e2:75:ad:8a:b7:84:58:e5:1f:07:32:f1:87:e6:bd:24:22:af:b7:ce:8e:9c:b4:10:cf:b9:f4:0e:d2
+    foo.example.com:verifycertsfile = /etc/ssl/trusted-ca-certs.pem
+
+To change the default minimum protocol version to TLS 1.2 but to allow TLS 1.1
+when connecting to ``hg.example.com``::
+
+    [hostsecurity]
+    minimumprotocol = tls1.2
+    hg.example.com:minimumprotocol = tls1.1
+
+``http_proxy``
+--------------
+
+Used to access web-based Mercurial repositories through a HTTP
+proxy.
+
+``host``
+    Host name and (optional) port of the proxy server, for example
+    "myproxy:8000".
+
+``no``
+    Optional. Comma-separated list of host names that should bypass
+    the proxy.
+
+``passwd``
+    Optional. Password to authenticate with at the proxy server.
+
+``user``
+    Optional. User name to authenticate with at the proxy server.
+
+``always``
+    Optional. Always use the proxy, even for localhost and any entries
+    in ``http_proxy.no``. (default: False)
+
+``http``
+----------
+
+Used to configure access to Mercurial repositories via HTTP.
+
+``timeout``
+    If set, blocking operations will timeout after that many seconds.
+    (default: None)
+
+``merge``
+---------
+
+This section specifies behavior during merges and updates.
+
+``checkignored``
+   Controls behavior when an ignored file on disk has the same name as a tracked
+   file in the changeset being merged or updated to, and has different
+   contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``,
+   abort on such files. With ``warn``, warn on such files and back them up as
+   ``.orig``. With ``ignore``, don't print a warning and back them up as
+   ``.orig``. (default: ``abort``)
+
+``checkunknown``
+   Controls behavior when an unknown file that isn't ignored has the same name
+   as a tracked file in the changeset being merged or updated to, and has
+   different contents. Similar to ``merge.checkignored``, except for files that
+   are not ignored. (default: ``abort``)
+
+``on-failure``
+   When set to ``continue`` (the default), the merge process attempts to
+   merge all unresolved files using the merge chosen tool, regardless of
+   whether previous file merge attempts during the process succeeded or not.
+   Setting this to ``prompt`` will prompt after any merge failure continue
+   or halt the merge process. Setting this to ``halt`` will automatically
+   halt the merge process on any merge tool failure. The merge process
+   can be restarted by using the ``resolve`` command. When a merge is
+   halted, the repository is left in a normal ``unresolved`` merge state.
+   (default: ``continue``)
+
+``strict-capability-check``
+   Whether capabilities of internal merge tools are checked strictly
+   or not, while examining rules to decide merge tool to be used.
+   (default: False)
+
+``merge-patterns``
+------------------
+
+This section specifies merge tools to associate with particular file
+patterns. Tools matched here will take precedence over the default
+merge tool. Patterns are globs by default, rooted at the repository
+root.
+
+Example::
+
+  [merge-patterns]
+  **.c = kdiff3
+  **.jpg = myimgmerge
+
+``merge-tools``
+---------------
+
+This section configures external merge tools to use for file-level
+merges. This section has likely been preconfigured at install time.
+Use :hg:`config merge-tools` to check the existing configuration.
+Also see :hg:`help merge-tools` for more details.
+
+Example ``~/.hgrc``::
+
+  [merge-tools]
+  # Override stock tool location
+  kdiff3.executable = ~/bin/kdiff3
+  # Specify command line
+  kdiff3.args = $base $local $other -o $output
+  # Give higher priority
+  kdiff3.priority = 1
+
+  # Changing the priority of preconfigured tool
+  meld.priority = 0
+
+  # Disable a preconfigured tool
+  vimdiff.disabled = yes
+
+  # Define new tool
+  myHtmlTool.args = -m $local $other $base $output
+  myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
+  myHtmlTool.priority = 1
+
+Supported arguments:
+
+``priority``
+  The priority in which to evaluate this tool.
+  (default: 0)
+
+``executable``
+  Either just the name of the executable or its pathname.
+
+  .. container:: windows
+
+    On Windows, the path can use environment variables with ${ProgramFiles}
+    syntax.
+
+  (default: the tool name)
+
+``args``
+  The arguments to pass to the tool executable. You can refer to the
+  files being merged as well as the output file through these
+  variables: ``$base``, ``$local``, ``$other``, ``$output``.
+
+  The meaning of ``$local`` and ``$other`` can vary depending on which action is
+  being performed. During an update or merge, ``$local`` represents the original
+  state of the file, while ``$other`` represents the commit you are updating to or
+  the commit you are merging with. During a rebase, ``$local`` represents the
+  destination of the rebase, and ``$other`` represents the commit being rebased.
+
+  Some operations define custom labels to assist with identifying the revisions,
+  accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
+  labels are not available, these will be ``local``, ``other``, and ``base``,
+  respectively.
+  (default: ``$local $base $other``)
+
+``premerge``
+  Attempt to run internal non-interactive 3-way merge tool before
+  launching external tool.  Options are ``true``, ``false``, ``keep`` or
+  ``keep-merge3``. The ``keep`` option will leave markers in the file if the
+  premerge fails. The ``keep-merge3`` will do the same but include information
+  about the base of the merge in the marker (see internal :merge3 in
+  :hg:`help merge-tools`).
+  (default: True)
+
+``binary``
+  This tool can merge binary files. (default: False, unless tool
+  was selected by file pattern match)
+
+``symlink``
+  This tool can merge symlinks. (default: False)
+
+``check``
+  A list of merge success-checking options:
+
+  ``changed``
+    Ask whether merge was successful when the merged file shows no changes.
+  ``conflicts``
+    Check whether there are conflicts even though the tool reported success.
+  ``prompt``
+    Always prompt for merge success, regardless of success reported by tool.
+
+``fixeol``
+  Attempt to fix up EOL changes caused by the merge tool.
+  (default: False)
+
+``gui``
+  This tool requires a graphical interface to run. (default: False)
+
+``mergemarkers``
+  Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
+  ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
+  ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
+  markers generated during premerge will be ``detailed`` if either this option or
+  the corresponding option in the ``[ui]`` section is ``detailed``.
+  (default: ``basic``)
+
+``mergemarkertemplate``
+  This setting can be used to override ``mergemarkertemplate`` from the ``[ui]``
+  section on a per-tool basis; this applies to the ``$label``-prefixed variables
+  and to the conflict markers that are generated if ``premerge`` is ``keep` or
+  ``keep-merge3``. See the corresponding variable in ``[ui]`` for more
+  information.
+
+.. container:: windows
+
+  ``regkey``
+    Windows registry key which describes install location of this
+    tool. Mercurial will search for this key first under
+    ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
+    (default: None)
+
+  ``regkeyalt``
+    An alternate Windows registry key to try if the first key is not
+    found.  The alternate key uses the same ``regname`` and ``regappend``
+    semantics of the primary key.  The most common use for this key
+    is to search for 32bit applications on 64bit operating systems.
+    (default: None)
+
+  ``regname``
+    Name of value to read from specified registry key.
+    (default: the unnamed (default) value)
+
+  ``regappend``
+    String to append to the value read from the registry, typically
+    the executable name of the tool.
+    (default: None)
+
+``pager``
+---------
+
+Setting used to control when to paginate and with what external tool. See
+:hg:`help pager` for details.
+
+``pager``
+    Define the external tool used as pager.
+
+    If no pager is set, Mercurial uses the environment variable $PAGER.
+    If neither pager.pager, nor $PAGER is set, a default pager will be
+    used, typically `less` on Unix and `more` on Windows. Example::
+
+      [pager]
+      pager = less -FRX
+
+``ignore``
+    List of commands to disable the pager for. Example::
+
+      [pager]
+      ignore = version, help, update
+
+``patch``
+---------
+
+Settings used when applying patches, for instance through the 'import'
+command or with Mercurial Queues extension.
+
+``eol``
+    When set to 'strict' patch content and patched files end of lines
+    are preserved. When set to ``lf`` or ``crlf``, both files end of
+    lines are ignored when patching and the result line endings are
+    normalized to either LF (Unix) or CRLF (Windows). When set to
+    ``auto``, end of lines are again ignored while patching but line
+    endings in patched files are normalized to their original setting
+    on a per-file basis. If target file does not exist or has no end
+    of line, patch line endings are preserved.
+    (default: strict)
+
+``fuzz``
+    The number of lines of 'fuzz' to allow when applying patches. This
+    controls how much context the patcher is allowed to ignore when
+    trying to apply a patch.
+    (default: 2)
+
+``paths``
+---------
+
+Assigns symbolic names and behavior to repositories.
+
+Options are symbolic names defining the URL or directory that is the
+location of the repository. Example::
+
+    [paths]
+    my_server = https://example.com/my_repo
+    local_path = /home/me/repo
+
+These symbolic names can be used from the command line. To pull
+from ``my_server``: :hg:`pull my_server`. To push to ``local_path``:
+:hg:`push local_path`.
+
+Options containing colons (``:``) denote sub-options that can influence
+behavior for that specific path. Example::
+
+    [paths]
+    my_server = https://example.com/my_path
+    my_server:pushurl = ssh://example.com/my_path
+
+The following sub-options can be defined:
+
+``pushurl``
+   The URL to use for push operations. If not defined, the location
+   defined by the path's main entry is used.
+
+``pushrev``
+   A revset defining which revisions to push by default.
+
+   When :hg:`push` is executed without a ``-r`` argument, the revset
+   defined by this sub-option is evaluated to determine what to push.
+
+   For example, a value of ``.`` will push the working directory's
+   revision by default.
+
+   Revsets specifying bookmarks will not result in the bookmark being
+   pushed.
+
+The following special named paths exist:
+
+``default``
+   The URL or directory to use when no source or remote is specified.
+
+   :hg:`clone` will automatically define this path to the location the
+   repository was cloned from.
+
+``default-push``
+   (deprecated) The URL or directory for the default :hg:`push` location.
+   ``default:pushurl`` should be used instead.
+
+``phases``
+----------
+
+Specifies default handling of phases. See :hg:`help phases` for more
+information about working with phases.
+
+``publish``
+    Controls draft phase behavior when working as a server. When true,
+    pushed changesets are set to public in both client and server and
+    pulled or cloned changesets are set to public in the client.
+    (default: True)
+
+``new-commit``
+    Phase of newly-created commits.
+    (default: draft)
+
+``checksubrepos``
+    Check the phase of the current revision of each subrepository. Allowed
+    values are "ignore", "follow" and "abort". For settings other than
+    "ignore", the phase of the current revision of each subrepository is
+    checked before committing the parent repository. If any of those phases is
+    greater than the phase of the parent repository (e.g. if a subrepo is in a
+    "secret" phase while the parent repo is in "draft" phase), the commit is
+    either aborted (if checksubrepos is set to "abort") or the higher phase is
+    used for the parent repository commit (if set to "follow").
+    (default: follow)
+
+
+``profiling``
+-------------
+
+Specifies profiling type, format, and file output. Two profilers are
+supported: an instrumenting profiler (named ``ls``), and a sampling
+profiler (named ``stat``).
+
+In this section description, 'profiling data' stands for the raw data
+collected during profiling, while 'profiling report' stands for a
+statistical text report generated from the profiling data.
+
+``enabled``
+    Enable the profiler.
+    (default: false)
+
+    This is equivalent to passing ``--profile`` on the command line.
+
+``type``
+    The type of profiler to use.
+    (default: stat)
+
+    ``ls``
+      Use Python's built-in instrumenting profiler. This profiler
+      works on all platforms, but each line number it reports is the
+      first line of a function. This restriction makes it difficult to
+      identify the expensive parts of a non-trivial function.
+    ``stat``
+      Use a statistical profiler, statprof. This profiler is most
+      useful for profiling commands that run for longer than about 0.1
+      seconds.
+
+``format``
+    Profiling format.  Specific to the ``ls`` instrumenting profiler.
+    (default: text)
+
+    ``text``
+      Generate a profiling report. When saving to a file, it should be
+      noted that only the report is saved, and the profiling data is
+      not kept.
+    ``kcachegrind``
+      Format profiling data for kcachegrind use: when saving to a
+      file, the generated file can directly be loaded into
+      kcachegrind.
+
+``statformat``
+    Profiling format for the ``stat`` profiler.
+    (default: hotpath)
+
+    ``hotpath``
+      Show a tree-based display containing the hot path of execution (where
+      most time was spent).
+    ``bymethod``
+      Show a table of methods ordered by how frequently they are active.
+    ``byline``
+      Show a table of lines in files ordered by how frequently they are active.
+    ``json``
+      Render profiling data as JSON.
+
+``frequency``
+    Sampling frequency.  Specific to the ``stat`` sampling profiler.
+    (default: 1000)
+
+``output``
+    File path where profiling data or report should be saved. If the
+    file exists, it is replaced. (default: None, data is printed on
+    stderr)
+
+``sort``
+    Sort field.  Specific to the ``ls`` instrumenting profiler.
+    One of ``callcount``, ``reccallcount``, ``totaltime`` and
+    ``inlinetime``.
+    (default: inlinetime)
+
+``time-track``
+    Control if the stat profiler track ``cpu`` or ``real`` time.
+    (default: ``cpu`` on Windows, otherwise ``real``)
+
+``limit``
+    Number of lines to show. Specific to the ``ls`` instrumenting profiler.
+    (default: 30)
+
+``nested``
+    Show at most this number of lines of drill-down info after each main entry.
+    This can help explain the difference between Total and Inline.
+    Specific to the ``ls`` instrumenting profiler.
+    (default: 0)
+
+``showmin``
+    Minimum fraction of samples an entry must have for it to be displayed.
+    Can be specified as a float between ``0.0`` and ``1.0`` or can have a
+    ``%`` afterwards to allow values up to ``100``. e.g. ``5%``.
+
+    Only used by the ``stat`` profiler.
+
+    For the ``hotpath`` format, default is ``0.05``.
+    For the ``chrome`` format, default is ``0.005``.
+
+    The option is unused on other formats.
+
+``showmax``
+    Maximum fraction of samples an entry can have before it is ignored in
+    display. Values format is the same as ``showmin``.
+
+    Only used by the ``stat`` profiler.
+
+    For the ``chrome`` format, default is ``0.999``.
+
+    The option is unused on other formats.
+
+``showtime``
+    Show time taken as absolute durations, in addition to percentages.
+    Only used by the ``hotpath`` format.
+    (default: true)
+
+``progress``
+------------
+
+Mercurial commands can draw progress bars that are as informative as
+possible. Some progress bars only offer indeterminate information, while others
+have a definite end point.
+
+``debug``
+    Whether to print debug info when updating the progress bar. (default: False)
+
+``delay``
+    Number of seconds (float) before showing the progress bar. (default: 3)
+
+``changedelay``
+    Minimum delay before showing a new topic. When set to less than 3 * refresh,
+    that value will be used instead. (default: 1)
+
+``estimateinterval``
+    Maximum sampling interval in seconds for speed and estimated time
+    calculation. (default: 60)
+
+``refresh``
+    Time in seconds between refreshes of the progress bar. (default: 0.1)
+
+``format``
+    Format of the progress bar.
+
+    Valid entries for the format field are ``topic``, ``bar``, ``number``,
+    ``unit``, ``estimate``, ``speed``, and ``item``. ``item`` defaults to the
+    last 20 characters of the item, but this can be changed by adding either
+    ``-<num>`` which would take the last num characters, or ``+<num>`` for the
+    first num characters.
+
+    (default: topic bar number estimate)
+
+``width``
+    If set, the maximum width of the progress information (that is, min(width,
+    term width) will be used).
+
+``clear-complete``
+    Clear the progress bar after it's done. (default: True)
+
+``disable``
+    If true, don't show a progress bar.
+
+``assume-tty``
+    If true, ALWAYS show a progress bar, unless disable is given.
+
+``rebase``
+----------
+
+``evolution.allowdivergence``
+    Default to False, when True allow creating divergence when performing
+    rebase of obsolete changesets.
+
+``revsetalias``
+---------------
+
+Alias definitions for revsets. See :hg:`help revsets` for details.
+
+``rewrite``
+-----------
+
+``backup-bundle``
+    Whether to save stripped changesets to a bundle file. (default: True)
+
+``update-timestamp``
+    If true, updates the date and time of the changeset to current. It is only
+    applicable for `hg amend`, `hg commit --amend` and `hg uncommit` in the
+    current version.
+
+``storage``
+-----------
+
+Control the strategy Mercurial uses internally to store history. Options in this
+category impact performance and repository size.
+
+``revlog.optimize-delta-parent-choice``
+    When storing a merge revision, both parents will be equally considered as
+    a possible delta base. This results in better delta selection and improved
+    revlog compression. This option is enabled by default.
+
+    Turning this option off can result in large increase of repository size for
+    repository with many merges.
+
+``revlog.reuse-external-delta-parent``
+    Control the order in which delta parents are considered when adding new
+    revisions from an external source.
+    (typically: apply bundle from `hg pull` or `hg push`).
+
+    New revisions are usually provided as a delta against other revisions. By
+    default, Mercurial will try to reuse this delta first, therefore using the
+    same "delta parent" as the source. Directly using delta's from the source
+    reduces CPU usage and usually speeds up operation. However, in some case,
+    the source might have sub-optimal delta bases and forcing their reevaluation
+    is useful. For example, pushes from an old client could have sub-optimal
+    delta's parent that the server want to optimize. (lack of general delta, bad
+    parents, choice, lack of sparse-revlog, etc).
+
+    This option is enabled by default. Turning it off will ensure bad delta
+    parent choices from older client do not propagate to this repository, at
+    the cost of a small increase in CPU consumption.
+
+    Note: this option only control the order in which delta parents are
+    considered.  Even when disabled, the existing delta from the source will be
+    reused if the same delta parent is selected.
+
+``revlog.reuse-external-delta``
+    Control the reuse of delta from external source.
+    (typically: apply bundle from `hg pull` or `hg push`).
+
+    New revisions are usually provided as a delta against another revision. By
+    default, Mercurial will not recompute the same delta again, trusting
+    externally provided deltas. There have been rare cases of small adjustment
+    to the diffing algorithm in the past. So in some rare case, recomputing
+    delta provided by ancient clients can provides better results. Disabling
+    this option means going through a full delta recomputation for all incoming
+    revisions. It means a large increase in CPU usage and will slow operations
+    down.
+
+    This option is enabled by default. When disabled, it also disables the
+    related ``storage.revlog.reuse-external-delta-parent`` option.
+
+``revlog.zlib.level``
+    Zlib compression level used when storing data into the repository. Accepted
+    Value range from 1 (lowest compression) to 9 (highest compression). Zlib
+    default value is 6.
+
+
+``revlog.zstd.level``
+    zstd compression level used when storing data into the repository. Accepted
+    Value range from 1 (lowest compression) to 22 (highest compression).
+    (default 3)
+
+``server``
+----------
+
+Controls generic server settings.
+
+``bookmarks-pushkey-compat``
+    Trigger pushkey hook when being pushed bookmark updates. This config exist
+    for compatibility purpose (default to True)
+
+    If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
+    movement we recommend you migrate them to ``txnclose-bookmark`` and
+    ``pretxnclose-bookmark``.
+
+``compressionengines``
+    List of compression engines and their relative priority to advertise
+    to clients.
+
+    The order of compression engines determines their priority, the first
+    having the highest priority. If a compression engine is not listed
+    here, it won't be advertised to clients.
+
+    If not set (the default), built-in defaults are used. Run
+    :hg:`debuginstall` to list available compression engines and their
+    default wire protocol priority.
+
+    Older Mercurial clients only support zlib compression and this setting
+    has no effect for legacy clients.
+
+``uncompressed``
+    Whether to allow clients to clone a repository using the
+    uncompressed streaming protocol. This transfers about 40% more
+    data than a regular clone, but uses less memory and CPU on both
+    server and client. Over a LAN (100 Mbps or better) or a very fast
+    WAN, an uncompressed streaming clone is a lot faster (~10x) than a
+    regular clone. Over most WAN connections (anything slower than
+    about 6 Mbps), uncompressed streaming is slower, because of the
+    extra data transfer overhead. This mode will also temporarily hold
+    the write lock while determining what data to transfer.
+    (default: True)
+
+``uncompressedallowsecret``
+    Whether to allow stream clones when the repository contains secret
+    changesets. (default: False)
+
+``preferuncompressed``
+    When set, clients will try to use the uncompressed streaming
+    protocol. (default: False)
+
+``disablefullbundle``
+    When set, servers will refuse attempts to do pull-based clones.
+    If this option is set, ``preferuncompressed`` and/or clone bundles
+    are highly recommended. Partial clones will still be allowed.
+    (default: False)
+
+``streamunbundle``
+    When set, servers will apply data sent from the client directly,
+    otherwise it will be written to a temporary file first. This option
+    effectively prevents concurrent pushes.
+
+``pullbundle``
+    When set, the server will check pullbundle.manifest for bundles
+    covering the requested heads and common nodes. The first matching
+    entry will be streamed to the client.
+
+    For HTTP transport, the stream will still use zlib compression
+    for older clients.
+
+``concurrent-push-mode``
+    Level of allowed race condition between two pushing clients.
+
+    - 'strict': push is abort if another client touched the repository
+      while the push was preparing. (default)
+    - 'check-related': push is only aborted if it affects head that got also
+      affected while the push was preparing.
+
+    This requires compatible client (version 4.3 and later). Old client will
+    use 'strict'.
+
+``validate``
+    Whether to validate the completeness of pushed changesets by
+    checking that all new file revisions specified in manifests are
+    present. (default: False)
+
+``maxhttpheaderlen``
+    Instruct HTTP clients not to send request headers longer than this
+    many bytes. (default: 1024)
+
+``bundle1``
+    Whether to allow clients to push and pull using the legacy bundle1
+    exchange format. (default: True)
+
+``bundle1gd``
+    Like ``bundle1`` but only used if the repository is using the
+    *generaldelta* storage format. (default: True)
+
+``bundle1.push``
+    Whether to allow clients to push using the legacy bundle1 exchange
+    format. (default: True)
+
+``bundle1gd.push``
+    Like ``bundle1.push`` but only used if the repository is using the
+    *generaldelta* storage format. (default: True)
+
+``bundle1.pull``
+    Whether to allow clients to pull using the legacy bundle1 exchange
+    format. (default: True)
+
+``bundle1gd.pull``
+    Like ``bundle1.pull`` but only used if the repository is using the
+    *generaldelta* storage format. (default: True)
+
+    Large repositories using the *generaldelta* storage format should
+    consider setting this option because converting *generaldelta*
+    repositories to the exchange format required by the bundle1 data
+    format can consume a lot of CPU.
+
+``bundle2.stream``
+    Whether to allow clients to pull using the bundle2 streaming protocol.
+    (default: True)
+
+``zliblevel``
+    Integer between ``-1`` and ``9`` that controls the zlib compression level
+    for wire protocol commands that send zlib compressed output (notably the
+    commands that send repository history data).
+
+    The default (``-1``) uses the default zlib compression level, which is
+    likely equivalent to ``6``. ``0`` means no compression. ``9`` means
+    maximum compression.
+
+    Setting this option allows server operators to make trade-offs between
+    bandwidth and CPU used. Lowering the compression lowers CPU utilization
+    but sends more bytes to clients.
+
+    This option only impacts the HTTP server.
+
+``zstdlevel``
+    Integer between ``1`` and ``22`` that controls the zstd compression level
+    for wire protocol commands. ``1`` is the minimal amount of compression and
+    ``22`` is the highest amount of compression.
+
+    The default (``3``) should be significantly faster than zlib while likely
+    delivering better compression ratios.
+
+    This option only impacts the HTTP server.
+
+    See also ``server.zliblevel``.
+
+``view``
+    Repository filter used when exchanging revisions with the peer.
+
+    The default view (``served``) excludes secret and hidden changesets.
+    Another useful value is ``immutable`` (no draft, secret or hidden
+    changesets). (EXPERIMENTAL)
+
+``smtp``
+--------
+
+Configuration for extensions that need to send email messages.
+
+``host``
+    Host name of mail server, e.g. "mail.example.com".
+
+``port``
+    Optional. Port to connect to on mail server. (default: 465 if
+    ``tls`` is smtps; 25 otherwise)
+
+``tls``
+    Optional. Method to enable TLS when connecting to mail server: starttls,
+    smtps or none. (default: none)
+
+``username``
+    Optional. User name for authenticating with the SMTP server.
+    (default: None)
+
+``password``
+    Optional. Password for authenticating with the SMTP server. If not
+    specified, interactive sessions will prompt the user for a
+    password; non-interactive sessions will fail. (default: None)
+
+``local_hostname``
+    Optional. The hostname that the sender can use to identify
+    itself to the MTA.
+
+
+``subpaths``
+------------
+
+Subrepository source URLs can go stale if a remote server changes name
+or becomes temporarily unavailable. This section lets you define
+rewrite rules of the form::
+
+    <pattern> = <replacement>
+
+where ``pattern`` is a regular expression matching a subrepository
+source URL and ``replacement`` is the replacement string used to
+rewrite it. Groups can be matched in ``pattern`` and referenced in
+``replacements``. For instance::
+
+    http://server/(.*)-hg/ = http://hg.server/\1/
+
+rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
+
+Relative subrepository paths are first made absolute, and the
+rewrite rules are then applied on the full (absolute) path. If ``pattern``
+doesn't match the full path, an attempt is made to apply it on the
+relative path alone. The rules are applied in definition order.
+
+``subrepos``
+------------
+
+This section contains options that control the behavior of the
+subrepositories feature. See also :hg:`help subrepos`.
+
+Security note: auditing in Mercurial is known to be insufficient to
+prevent clone-time code execution with carefully constructed Git
+subrepos. It is unknown if a similar detect is present in Subversion
+subrepos. Both Git and Subversion subrepos are disabled by default
+out of security concerns. These subrepo types can be enabled using
+the respective options below.
+
+``allowed``
+    Whether subrepositories are allowed in the working directory.
+
+    When false, commands involving subrepositories (like :hg:`update`)
+    will fail for all subrepository types.
+    (default: true)
+
+``hg:allowed``
+    Whether Mercurial subrepositories are allowed in the working
+    directory. This option only has an effect if ``subrepos.allowed``
+    is true.
+    (default: true)
+
+``git:allowed``
+    Whether Git subrepositories are allowed in the working directory.
+    This option only has an effect if ``subrepos.allowed`` is true.
+
+    See the security note above before enabling Git subrepos.
+    (default: false)
+
+``svn:allowed``
+    Whether Subversion subrepositories are allowed in the working
+    directory. This option only has an effect if ``subrepos.allowed``
+    is true.
+
+    See the security note above before enabling Subversion subrepos.
+    (default: false)
+
+``templatealias``
+-----------------
+
+Alias definitions for templates. See :hg:`help templates` for details.
+
+``templates``
+-------------
+
+Use the ``[templates]`` section to define template strings.
+See :hg:`help templates` for details.
+
+``trusted``
+-----------
+
+Mercurial will not use the settings in the
+``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
+user or to a trusted group, as various hgrc features allow arbitrary
+commands to be run. This issue is often encountered when configuring
+hooks or extensions for shared repositories or servers. However,
+the web interface will use some safe settings from the ``[web]``
+section.
+
+This section specifies what users and groups are trusted. The
+current user is always trusted. To trust everybody, list a user or a
+group with name ``*``. These settings must be placed in an
+*already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
+user or service running Mercurial.
+
+``users``
+  Comma-separated list of trusted users.
+
+``groups``
+  Comma-separated list of trusted groups.
+
+
+``ui``
+------
+
+User interface controls.
+
+``archivemeta``
+    Whether to include the .hg_archival.txt file containing meta data
+    (hashes for the repository base and for tip) in archives created
+    by the :hg:`archive` command or downloaded via hgweb.
+    (default: True)
+
+``askusername``
+    Whether to prompt for a username when committing. If True, and
+    neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
+    be prompted to enter a username. If no username is entered, the
+    default ``USER@HOST`` is used instead.
+    (default: False)
+
+``clonebundles``
+    Whether the "clone bundles" feature is enabled.
+
+    When enabled, :hg:`clone` may download and apply a server-advertised
+    bundle file from a URL instead of using the normal exchange mechanism.
+
+    This can likely result in faster and more reliable clones.
+
+    (default: True)
+
+``clonebundlefallback``
+    Whether failure to apply an advertised "clone bundle" from a server
+    should result in fallback to a regular clone.
+
+    This is disabled by default because servers advertising "clone
+    bundles" often do so to reduce server load. If advertised bundles
+    start mass failing and clients automatically fall back to a regular
+    clone, this would add significant and unexpected load to the server
+    since the server is expecting clone operations to be offloaded to
+    pre-generated bundles. Failing fast (the default behavior) ensures
+    clients don't overwhelm the server when "clone bundle" application
+    fails.
+
+    (default: False)
+
+``clonebundleprefers``
+    Defines preferences for which "clone bundles" to use.
+
+    Servers advertising "clone bundles" may advertise multiple available
+    bundles. Each bundle may have different attributes, such as the bundle
+    type and compression format. This option is used to prefer a particular
+    bundle over another.
+
+    The following keys are defined by Mercurial:
+
+    BUNDLESPEC
+       A bundle type specifier. These are strings passed to :hg:`bundle -t`.
+       e.g. ``gzip-v2`` or ``bzip2-v1``.
+
+    COMPRESSION
+       The compression format of the bundle. e.g. ``gzip`` and ``bzip2``.
+
+    Server operators may define custom keys.
+
+    Example values: ``COMPRESSION=bzip2``,
+    ``BUNDLESPEC=gzip-v2, COMPRESSION=gzip``.
+
+    By default, the first bundle advertised by the server is used.
+
+``color``
+    When to colorize output. Possible value are Boolean ("yes" or "no"), or
+    "debug", or "always". (default: "yes"). "yes" will use color whenever it
+    seems possible. See :hg:`help color` for details.
+
+``commitsubrepos``
+    Whether to commit modified subrepositories when committing the
+    parent repository. If False and one subrepository has uncommitted
+    changes, abort the commit.
+    (default: False)
+
+``debug``
+    Print debugging information. (default: False)
+
+``editor``
+    The editor to use during a commit. (default: ``$EDITOR`` or ``vi``)
+
+``fallbackencoding``
+    Encoding to try if it's not possible to decode the changelog using
+    UTF-8. (default: ISO-8859-1)
+
+``graphnodetemplate``
+    The template used to print changeset nodes in an ASCII revision graph.
+    (default: ``{graphnode}``)
+
+``ignore``
+    A file to read per-user ignore patterns from. This file should be
+    in the same format as a repository-wide .hgignore file. Filenames
+    are relative to the repository root. This option supports hook syntax,
+    so if you want to specify multiple ignore files, you can do so by
+    setting something like ``ignore.other = ~/.hgignore2``. For details
+    of the ignore file format, see the ``hgignore(5)`` man page.
+
+``interactive``
+    Allow to prompt the user. (default: True)
+
+``interface``
+    Select the default interface for interactive features (default: text).
+    Possible values are 'text' and 'curses'.
+
+``interface.chunkselector``
+    Select the interface for change recording (e.g. :hg:`commit -i`).
+    Possible values are 'text' and 'curses'.
+    This config overrides the interface specified by ui.interface.
+
+``large-file-limit``
+    Largest file size that gives no memory use warning.
+    Possible values are integers or 0 to disable the check.
+    (default: 10000000)
+
+``logtemplate``
+    Template string for commands that print changesets.
+
+``merge``
+    The conflict resolution program to use during a manual merge.
+    For more information on merge tools see :hg:`help merge-tools`.
+    For configuring merge tools see the ``[merge-tools]`` section.
+
+``mergemarkers``
+    Sets the merge conflict marker label styling. The ``detailed``
+    style uses the ``mergemarkertemplate`` setting to style the labels.
+    The ``basic`` style just uses 'local' and 'other' as the marker label.
+    One of ``basic`` or ``detailed``.
+    (default: ``basic``)
+
+``mergemarkertemplate``
+    The template used to print the commit description next to each conflict
+    marker during merge conflicts. See :hg:`help templates` for the template
+    format.
+
+    Defaults to showing the hash, tags, branches, bookmarks, author, and
+    the first line of the commit description.
+
+    If you use non-ASCII characters in names for tags, branches, bookmarks,
+    authors, and/or commit descriptions, you must pay attention to encodings of
+    managed files. At template expansion, non-ASCII characters use the encoding
+    specified by the ``--encoding`` global option, ``HGENCODING`` or other
+    environment variables that govern your locale. If the encoding of the merge
+    markers is different from the encoding of the merged files,
+    serious problems may occur.
+
+    Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
+
+``message-output``
+    Where to write status and error messages. (default: ``stdio``)
+
+    ``stderr``
+      Everything to stderr.
+    ``stdio``
+      Status to stdout, and error to stderr.
+
+``origbackuppath``
+    The path to a directory used to store generated .orig files. If the path is
+    not a directory, one will be created.  If set, files stored in this
+    directory have the same name as the original file and do not have a .orig
+    suffix.
+
+``paginate``
+  Control the pagination of command output (default: True). See :hg:`help pager`
+  for details.
+
+``patch``
+    An optional external tool that ``hg import`` and some extensions
+    will use for applying patches. By default Mercurial uses an
+    internal patch utility. The external tool must work as the common
+    Unix ``patch`` program. In particular, it must accept a ``-p``
+    argument to strip patch headers, a ``-d`` argument to specify the
+    current directory, a file name to patch, and a patch file to take
+    from stdin.
+
+    It is possible to specify a patch tool together with extra
+    arguments. For example, setting this option to ``patch --merge``
+    will use the ``patch`` program with its 2-way merge option.
+
+``portablefilenames``
+    Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
+    (default: ``warn``)
+
+    ``warn``
+      Print a warning message on POSIX platforms, if a file with a non-portable
+      filename is added (e.g. a file with a name that can't be created on
+      Windows because it contains reserved parts like ``AUX``, reserved
+      characters like ``:``, or would cause a case collision with an existing
+      file).
+
+    ``ignore``
+      Don't print a warning.
+
+    ``abort``
+      The command is aborted.
+
+    ``true``
+      Alias for ``warn``.
+
+    ``false``
+      Alias for ``ignore``.
+
+    .. container:: windows
+
+      On Windows, this configuration option is ignored and the command aborted.
+
+``pre-merge-tool-output-template``
+    A template that is printed before executing an external merge tool. This can
+    be used to print out additional context that might be useful to have during
+    the conflict resolution, such as the description of the various commits
+    involved or bookmarks/tags.
+
+    Additional information is available in the ``local`, ``base``, and ``other``
+    dicts. For example: ``{local.label}``, ``{base.name}``, or
+    ``{other.islink}``.
+
+``quiet``
+    Reduce the amount of output printed.
+    (default: False)
+
+``relative-paths``
+    Prefer relative paths in the UI.
+
+``remotecmd``
+    Remote command to use for clone/push/pull operations.
+    (default: ``hg``)
+
+``report_untrusted``
+    Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
+    trusted user or group.
+    (default: True)
+
+``slash``
+    (Deprecated. Use ``slashpath`` template filter instead.)
+
+    Display paths using a slash (``/``) as the path separator. This
+    only makes a difference on systems where the default path
+    separator is not the slash character (e.g. Windows uses the
+    backslash character (``\``)).
+    (default: False)
+
+``statuscopies``
+    Display copies in the status command.
+
+``ssh``
+    Command to use for SSH connections. (default: ``ssh``)
+
+``ssherrorhint``
+    A hint shown to the user in the case of SSH error (e.g.
+    ``Please see http://company/internalwiki/ssh.html``)
+
+``strict``
+    Require exact command names, instead of allowing unambiguous
+    abbreviations. (default: False)
+
+``style``
+    Name of style to use for command output.
+
+``supportcontact``
+    A URL where users should report a Mercurial traceback. Use this if you are a
+    large organisation with its own Mercurial deployment process and crash
+    reports should be addressed to your internal support.
+
+``textwidth``
+    Maximum width of help text. A longer line generated by ``hg help`` or
+    ``hg subcommand --help`` will be broken after white space to get this
+    width or the terminal width, whichever comes first.
+    A non-positive value will disable this and the terminal width will be
+    used. (default: 78)
+
+``timeout``
+    The timeout used when a lock is held (in seconds), a negative value
+    means no timeout. (default: 600)
+
+``timeout.warn``
+    Time (in seconds) before a warning is printed about held lock. A negative
+    value means no warning. (default: 0)
+
+``traceback``
+    Mercurial always prints a traceback when an unknown exception
+    occurs. Setting this to True will make Mercurial print a traceback
+    on all exceptions, even those recognized by Mercurial (such as
+    IOError or MemoryError). (default: False)
+
+``tweakdefaults``
+
+    By default Mercurial's behavior changes very little from release
+    to release, but over time the recommended config settings
+    shift. Enable this config to opt in to get automatic tweaks to
+    Mercurial's behavior over time. This config setting will have no
+    effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
+    not include ``tweakdefaults``. (default: False)
+
+    It currently means::
+
+      .. tweakdefaultsmarker
+
+``username``
+    The committer of a changeset created when running "commit".
+    Typically a person's name and email address, e.g. ``Fred Widget
+    <fred@example.com>``. Environment variables in the
+    username are expanded.
+
+    (default: ``$EMAIL`` or ``username@hostname``. If the username in
+    hgrc is empty, e.g. if the system admin set ``username =`` in the
+    system hgrc, it has to be specified manually or in a different
+    hgrc file)
+
+``verbose``
+    Increase the amount of output printed. (default: False)
+
+
+``web``
+-------
+
+Web interface configuration. The settings in this section apply to
+both the builtin webserver (started by :hg:`serve`) and the script you
+run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
+and WSGI).
+
+The Mercurial webserver does no authentication (it does not prompt for
+usernames and passwords to validate *who* users are), but it does do
+authorization (it grants or denies access for *authenticated users*
+based on settings in this section). You must either configure your
+webserver to do authentication for you, or disable the authorization
+checks.
+
+For a quick setup in a trusted environment, e.g., a private LAN, where
+you want it to accept pushes from anybody, you can use the following
+command line::
+
+    $ hg --config web.allow-push=* --config web.push_ssl=False serve
+
+Note that this will allow anybody to push anything to the server and
+that this should not be used for public servers.
+
+The full set of options is:
+
+``accesslog``
+    Where to output the access log. (default: stdout)
+
+``address``
+    Interface address to bind to. (default: all)
+
+``allow-archive``
+    List of archive format (bz2, gz, zip) allowed for downloading.
+    (default: empty)
+
+``allowbz2``
+    (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
+    revisions.
+    (default: False)
+
+``allowgz``
+    (DEPRECATED) Whether to allow .tar.gz downloading of repository
+    revisions.
+    (default: False)
+
+``allow-pull``
+    Whether to allow pulling from the repository. (default: True)
+
+``allow-push``
+    Whether to allow pushing to the repository. If empty or not set,
+    pushing is not allowed. If the special value ``*``, any remote
+    user can push, including unauthenticated users. Otherwise, the
+    remote user must have been authenticated, and the authenticated
+    user name must be present in this list. The contents of the
+    allow-push list are examined after the deny_push list.
+
+``allow_read``
+    If the user has not already been denied repository access due to
+    the contents of deny_read, this list determines whether to grant
+    repository access to the user. If this list is not empty, and the
+    user is unauthenticated or not present in the list, then access is
+    denied for the user. If the list is empty or not set, then access
+    is permitted to all users by default. Setting allow_read to the
+    special value ``*`` is equivalent to it not being set (i.e. access
+    is permitted to all users). The contents of the allow_read list are
+    examined after the deny_read list.
+
+``allowzip``
+    (DEPRECATED) Whether to allow .zip downloading of repository
+    revisions. This feature creates temporary files.
+    (default: False)
+
+``archivesubrepos``
+    Whether to recurse into subrepositories when archiving.
+    (default: False)
+
+``baseurl``
+    Base URL to use when publishing URLs in other locations, so
+    third-party tools like email notification hooks can construct
+    URLs. Example: ``http://hgserver/repos/``.
+
+``cacerts``
+    Path to file containing a list of PEM encoded certificate
+    authority certificates. Environment variables and ``~user``
+    constructs are expanded in the filename. If specified on the
+    client, then it will verify the identity of remote HTTPS servers
+    with these certificates.
+
+    To disable SSL verification temporarily, specify ``--insecure`` from
+    command line.
+
+    You can use OpenSSL's CA certificate file if your platform has
+    one. On most Linux systems this will be
+    ``/etc/ssl/certs/ca-certificates.crt``. Otherwise you will have to
+    generate this file manually. The form must be as follows::
+
+        -----BEGIN CERTIFICATE-----
+        ... (certificate in base64 PEM encoding) ...
+        -----END CERTIFICATE-----
+        -----BEGIN CERTIFICATE-----
+        ... (certificate in base64 PEM encoding) ...
+        -----END CERTIFICATE-----
+
+``cache``
+    Whether to support caching in hgweb. (default: True)
+
+``certificate``
+    Certificate to use when running :hg:`serve`.
+
+``collapse``
+    With ``descend`` enabled, repositories in subdirectories are shown at
+    a single level alongside repositories in the current path. With
+    ``collapse`` also enabled, repositories residing at a deeper level than
+    the current path are grouped behind navigable directory entries that
+    lead to the locations of these repositories. In effect, this setting
+    collapses each collection of repositories found within a subdirectory
+    into a single entry for that subdirectory. (default: False)
+
+``comparisoncontext``
+    Number of lines of context to show in side-by-side file comparison. If
+    negative or the value ``full``, whole files are shown. (default: 5)
+
+    This setting can be overridden by a ``context`` request parameter to the
+    ``comparison`` command, taking the same values.
+
+``contact``
+    Name or email address of the person in charge of the repository.
+    (default: ui.username or ``$EMAIL`` or "unknown" if unset or empty)
+
+``csp``
+    Send a ``Content-Security-Policy`` HTTP header with this value.
+
+    The value may contain a special string ``%nonce%``, which will be replaced
+    by a randomly-generated one-time use value. If the value contains
+    ``%nonce%``, ``web.cache`` will be disabled, as caching undermines the
+    one-time property of the nonce. This nonce will also be inserted into
+    ``<script>`` elements containing inline JavaScript.
+
+    Note: lots of HTML content sent by the server is derived from repository
+    data. Please consider the potential for malicious repository data to
+    "inject" itself into generated HTML content as part of your security
+    threat model.
+
+``deny_push``
+    Whether to deny pushing to the repository. If empty or not set,
+    push is not denied. If the special value ``*``, all remote users are
+    denied push. Otherwise, unauthenticated users are all denied, and
+    any authenticated user name present in this list is also denied. The
+    contents of the deny_push list are examined before the allow-push list.
+
+``deny_read``
+    Whether to deny reading/viewing of the repository. If this list is
+    not empty, unauthenticated users are all denied, and any
+    authenticated user name present in this list is also denied access to
+    the repository. If set to the special value ``*``, all remote users
+    are denied access (rarely needed ;). If deny_read is empty or not set,
+    the determination of repository access depends on the presence and
+    content of the allow_read list (see description). If both
+    deny_read and allow_read are empty or not set, then access is
+    permitted to all users by default. If the repository is being
+    served via hgwebdir, denied users will not be able to see it in
+    the list of repositories. The contents of the deny_read list have
+    priority over (are examined before) the contents of the allow_read
+    list.
+
+``descend``
+    hgwebdir indexes will not descend into subdirectories. Only repositories
+    directly in the current path will be shown (other repositories are still
+    available from the index corresponding to their containing path).
+
+``description``
+    Textual description of the repository's purpose or contents.
+    (default: "unknown")
+
+``encoding``
+    Character encoding name. (default: the current locale charset)
+    Example: "UTF-8".
+
+``errorlog``
+    Where to output the error log. (default: stderr)
+
+``guessmime``
+    Control MIME types for raw download of file content.
+    Set to True to let hgweb guess the content type from the file
+    extension. This will serve HTML files as ``text/html`` and might
+    allow cross-site scripting attacks when serving untrusted
+    repositories. (default: False)
+
+``hidden``
+    Whether to hide the repository in the hgwebdir index.
+    (default: False)
+
+``ipv6``
+    Whether to use IPv6. (default: False)
+
+``labels``
+    List of string *labels* associated with the repository.
+
+    Labels are exposed as a template keyword and can be used to customize
+    output. e.g. the ``index`` template can group or filter repositories
+    by labels and the ``summary`` template can display additional content
+    if a specific label is present.
+
+``logoimg``
+    File name of the logo image that some templates display on each page.
+    The file name is relative to ``staticurl``. That is, the full path to
+    the logo image is "staticurl/logoimg".
+    If unset, ``hglogo.png`` will be used.
+
+``logourl``
+    Base URL to use for logos. If unset, ``https://mercurial-scm.org/``
+    will be used.
+
+``maxchanges``
+    Maximum number of changes to list on the changelog. (default: 10)
+
+``maxfiles``
+    Maximum number of files to list per changeset. (default: 10)
+
+``maxshortchanges``
+    Maximum number of changes to list on the shortlog, graph or filelog
+    pages. (default: 60)
+
+``name``
+    Repository name to use in the web interface.
+    (default: current working directory)
+
+``port``
+    Port to listen on. (default: 8000)
+
+``prefix``
+    Prefix path to serve from. (default: '' (server root))
+
+``push_ssl``
+    Whether to require that inbound pushes be transported over SSL to
+    prevent password sniffing. (default: True)
+
+``refreshinterval``
+    How frequently directory listings re-scan the filesystem for new
+    repositories, in seconds. This is relevant when wildcards are used
+    to define paths. Depending on how much filesystem traversal is
+    required, refreshing may negatively impact performance.
+
+    Values less than or equal to 0 always refresh.
+    (default: 20)
+
+``server-header``
+    Value for HTTP ``Server`` response header.
+
+``static``
+    Directory where static files are served from.
+
+``staticurl``
+    Base URL to use for static files. If unset, static files (e.g. the
+    hgicon.png favicon) will be served by the CGI script itself. Use
+    this setting to serve them directly with the HTTP server.
+    Example: ``http://hgserver/static/``.
+
+``stripes``
+    How many lines a "zebra stripe" should span in multi-line output.
+    Set to 0 to disable. (default: 1)
+
+``style``
+    Which template map style to use. The available options are the names of
+    subdirectories in the HTML templates path. (default: ``paper``)
+    Example: ``monoblue``.
+
+``templates``
+    Where to find the HTML templates. The default path to the HTML templates
+    can be obtained from ``hg debuginstall``.
+
+``websub``
+----------
+
+Web substitution filter definition. You can use this section to
+define a set of regular expression substitution patterns which
+let you automatically modify the hgweb server output.
+
+The default hgweb templates only apply these substitution patterns
+on the revision description fields. You can apply them anywhere
+you want when you create your own templates by adding calls to the
+"websub" filter (usually after calling the "escape" filter).
+
+This can be used, for example, to convert issue references to links
+to your issue tracker, or to convert "markdown-like" syntax into
+HTML (see the examples below).
+
+Each entry in this section names a substitution filter.
+The value of each entry defines the substitution expression itself.
+The websub expressions follow the old interhg extension syntax,
+which in turn imitates the Unix sed replacement syntax::
+
+    patternname = s/SEARCH_REGEX/REPLACE_EXPRESSION/[i]
+
+You can use any separator other than "/". The final "i" is optional
+and indicates that the search must be case insensitive.
+
+Examples::
+
+    [websub]
+    issues = s|issue(\d+)|<a href="http://bts.example.org/issue\1">issue\1</a>|i
+    italic = s/\b_(\S+)_\b/<i>\1<\/i>/
+    bold = s/\*\b(\S+)\b\*/<b>\1<\/b>/
+
+``worker``
+----------
+
+Parallel master/worker configuration. We currently perform working
+directory updates in parallel on Unix-like systems, which greatly
+helps performance.
+
+``enabled``
+    Whether to enable workers code to be used.
+    (default: true)
+
+``numcpus``
+    Number of CPUs to use for parallel operations. A zero or
+    negative value is treated as ``use the default``.
+    (default: 4 or the number of CPUs on the system, whichever is larger)
+
+``backgroundclose``
+    Whether to enable closing file handles on background threads during certain
+    operations. Some platforms aren't very efficient at closing file
+    handles that have been written or appended to. By performing file closing
+    on background threads, file write rate can increase substantially.
+    (default: true on Windows, false elsewhere)
+
+``backgroundcloseminfilecount``
+    Minimum number of files required to trigger background file closing.
+    Operations not writing this many files won't start background close
+    threads.
+    (default: 2048)
+
+``backgroundclosemaxqueue``
+    The maximum number of opened file handles waiting to be closed in the
+    background. This option only has an effect if ``backgroundclose`` is
+    enabled.
+    (default: 384)
+
+``backgroundclosethreadcount``
+    Number of threads to process background file closes. Only relevant if
+    ``backgroundclose`` is enabled.
+    (default: 4)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/dates.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,39 @@
+Some commands allow the user to specify a date, e.g.:
+
+- backout, commit, import, tag: Specify the commit date.
+- log, revert, update: Select revision(s) by date.
+
+Many date formats are valid. Here are some examples:
+
+- ``Wed Dec 6 13:18:29 2006`` (local timezone assumed)
+- ``Dec 6 13:18 -0600`` (year assumed, time offset provided)
+- ``Dec 6 13:18 UTC`` (UTC and GMT are aliases for +0000)
+- ``Dec 6`` (midnight)
+- ``13:18`` (today assumed)
+- ``3:39`` (3:39AM assumed)
+- ``3:39pm`` (15:39)
+- ``2006-12-06 13:18:29`` (ISO 8601 format)
+- ``2006-12-6 13:18``
+- ``2006-12-6``
+- ``12-6``
+- ``12/6``
+- ``12/6/6`` (Dec 6 2006)
+- ``today`` (midnight)
+- ``yesterday`` (midnight)
+- ``now`` - right now
+
+Lastly, there is Mercurial's internal format:
+
+- ``1165411109 0`` (Wed Dec 6 13:18:29 2006 UTC)
+
+This is the internal representation format for dates. The first number
+is the number of seconds since the epoch (1970-01-01 00:00 UTC). The
+second is the offset of the local timezone, in seconds west of UTC
+(negative if the timezone is east of UTC).
+
+The log command also accepts date ranges:
+
+- ``<DATE`` - at or before a given date/time
+- ``>DATE`` - on or after a given date/time
+- ``DATE to DATE`` - a date range, inclusive
+- ``-DAYS`` - within a given number of days of today
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/deprecated.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,30 @@
+Mercurial evolves over time, some features, options, commands may be replaced by
+better and more secure alternatives. This topic will help you migrating your
+existing usage and/or configuration to newer features.
+
+Commands
+========
+
+The following commands are still available but their use are not recommended:
+
+``locate``
+
+This command has been replaced by `hg files`.
+
+``parents``
+
+This command can be replaced by `hg summary` or `hg log` with appropriate
+revsets. See `hg help revsets` for more information.
+
+``tip``
+
+The recommended alternative is `hg heads`.
+
+Options
+=======
+
+``web.allowpull``
+    Renamed to `allow-pull`.
+
+``web.allow_push``
+    Renamed to `allow-push`.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/diffs.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,29 @@
+Mercurial's default format for showing changes between two versions of
+a file is compatible with the unified format of GNU diff, which can be
+used by GNU patch and many other standard tools.
+
+While this standard format is often enough, it does not encode the
+following information:
+
+- executable status and other permission bits
+- copy or rename information
+- changes in binary files
+- creation or deletion of empty files
+
+Mercurial also supports the extended diff format from the git VCS
+which addresses these limitations. The git diff format is not produced
+by default because a few widespread tools still do not understand this
+format.
+
+This means that when generating diffs from a Mercurial repository
+(e.g. with :hg:`export`), you should be careful about things like file
+copies and renames or other things mentioned above, because when
+applying a standard diff to a different repository, this extra
+information is lost. Mercurial's internal operations (like push and
+pull) are not affected by this, because they use an internal binary
+format for communicating changes.
+
+To make Mercurial produce the git extended diff format, use the --git
+option available for many commands, or set 'git = True' in the [diff]
+section of your configuration file. You do not need to set this option
+when importing diffs in this format or using them in the mq extension.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/environment.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,124 @@
+HG
+    Path to the 'hg' executable, automatically passed when running
+    hooks, extensions or external tools. If unset or empty, this is
+    the hg executable's name if it's frozen, or an executable named
+    'hg' (with %PATHEXT% [defaulting to COM/EXE/BAT/CMD] extensions on
+    Windows) is searched.
+
+HGEDITOR
+    This is the name of the editor to run when committing. See EDITOR.
+
+    (deprecated, see :hg:`help config.ui.editor`)
+
+HGENCODING
+    This overrides the default locale setting detected by Mercurial.
+    This setting is used to convert data including usernames,
+    changeset descriptions, tag names, and branches. This setting can
+    be overridden with the --encoding command-line option.
+
+HGENCODINGMODE
+    This sets Mercurial's behavior for handling unknown characters
+    while transcoding user input. The default is "strict", which
+    causes Mercurial to abort if it can't map a character. Other
+    settings include "replace", which replaces unknown characters, and
+    "ignore", which drops them. This setting can be overridden with
+    the --encodingmode command-line option.
+
+HGENCODINGAMBIGUOUS
+    This sets Mercurial's behavior for handling characters with
+    "ambiguous" widths like accented Latin characters with East Asian
+    fonts. By default, Mercurial assumes ambiguous characters are
+    narrow, set this variable to "wide" if such characters cause
+    formatting problems.
+
+HGMERGE
+    An executable to use for resolving merge conflicts. The program
+    will be executed with three arguments: local file, remote file,
+    ancestor file.
+
+    (deprecated, see :hg:`help config.ui.merge`)
+
+HGRCPATH
+    A list of files or directories to search for configuration
+    files. Item separator is ":" on Unix, ";" on Windows. If HGRCPATH
+    is not set, platform default search path is used. If empty, only
+    the .hg/hgrc from the current repository is read.
+
+    For each element in HGRCPATH:
+
+    - if it's a directory, all files ending with .rc are added
+    - otherwise, the file itself will be added
+
+HGRCSKIPREPO
+    When set, the .hg/hgrc from repositories are not read.
+
+HGPLAIN
+    When set, this disables any configuration settings that might
+    change Mercurial's default output. This includes encoding,
+    defaults, verbose mode, debug mode, quiet mode, tracebacks, and
+    localization. This can be useful when scripting against Mercurial
+    in the face of existing user configuration.
+
+    In addition to the features disabled by ``HGPLAIN=``, the following
+    values can be specified to adjust behavior:
+
+    ``+strictflags``
+        Restrict parsing of command line flags.
+
+    Equivalent options set via command line flags or environment
+    variables are not overridden.
+
+    See :hg:`help scripting` for details.
+
+HGPLAINEXCEPT
+    This is a comma-separated list of features to preserve when
+    HGPLAIN is enabled. Currently the following values are supported:
+
+    ``alias``
+        Don't remove aliases.
+    ``color``
+        Don't disable colored output.
+    ``i18n``
+        Preserve internationalization.
+    ``revsetalias``
+        Don't remove revset aliases.
+    ``templatealias``
+        Don't remove template aliases.
+    ``progress``
+        Don't hide progress output.
+
+    Setting HGPLAINEXCEPT to anything (even an empty string) will
+    enable plain mode.
+
+HGUSER
+    This is the string used as the author of a commit. If not set,
+    available values will be considered in this order:
+
+    - HGUSER (deprecated)
+    - configuration files from the HGRCPATH
+    - EMAIL
+    - interactive prompt
+    - LOGNAME (with ``@hostname`` appended)
+
+    (deprecated, see :hg:`help config.ui.username`)
+
+EMAIL
+    May be used as the author of a commit; see HGUSER.
+
+LOGNAME
+    May be used as the author of a commit; see HGUSER.
+
+VISUAL
+    This is the name of the editor to use when committing. See EDITOR.
+
+EDITOR
+    Sometimes Mercurial needs to open a text file in an editor for a
+    user to modify, for example when writing commit messages. The
+    editor it uses is determined by looking at the environment
+    variables HGEDITOR, VISUAL and EDITOR, in that order. The first
+    non-empty one is chosen. If all of them are empty, the editor
+    defaults to 'vi'.
+
+PYTHONPATH
+    This is used by Python to find imported modules and may need to be
+    set appropriately if this Mercurial is not installed system-wide.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/extensions.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,35 @@
+Mercurial has the ability to add new features through the use of
+extensions. Extensions may add new commands, add options to
+existing commands, change the default behavior of commands, or
+implement hooks.
+
+To enable the "foo" extension, either shipped with Mercurial or in the
+Python search path, create an entry for it in your configuration file,
+like this::
+
+  [extensions]
+  foo =
+
+You may also specify the full path to an extension::
+
+  [extensions]
+  myfeature = ~/.hgext/myfeature.py
+
+See :hg:`help config` for more information on configuration files.
+
+Extensions are not loaded by default for a variety of reasons:
+they can increase startup overhead; they may be meant for advanced
+usage only; they may provide potentially dangerous abilities (such
+as letting you destroy or modify history); they might not be ready
+for prime time; or they may alter some usual behaviors of stock
+Mercurial. It is thus up to the user to activate extensions as
+needed.
+
+To explicitly disable an extension enabled in a configuration file of
+broader scope, prepend its path with !::
+
+  [extensions]
+  # disabling extension bar residing in /path/to/extension/bar.py
+  bar = !/path/to/extension/bar.py
+  # ditto, but no path was supplied for extension baz
+  baz = !
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/filesets.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,79 @@
+Mercurial supports a functional language for selecting a set of
+files.
+
+Like other file patterns, this pattern type is indicated by a prefix,
+'set:'. The language supports a number of predicates which are joined
+by infix operators. Parenthesis can be used for grouping.
+
+Identifiers such as filenames or patterns must be quoted with single
+or double quotes if they contain characters outside of
+``[.*{}[]?/\_a-zA-Z0-9\x80-\xff]`` or if they match one of the
+predefined predicates. This generally applies to file patterns other
+than globs and arguments for predicates. Pattern prefixes such as
+``path:`` may be specified without quoting.
+
+Special characters can be used in quoted identifiers by escaping them,
+e.g., ``\n`` is interpreted as a newline. To prevent them from being
+interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``.
+
+See also :hg:`help patterns`.
+
+Operators
+=========
+
+There is a single prefix operator:
+
+``not x``
+  Files not in x. Short form is ``! x``.
+
+These are the supported infix operators:
+
+``x and y``
+  The intersection of files in x and y. Short form is ``x & y``.
+
+``x or y``
+  The union of files in x and y. There are two alternative short
+  forms: ``x | y`` and ``x + y``.
+
+``x - y``
+  Files in x but not in y.
+
+Predicates
+==========
+
+The following predicates are supported:
+
+.. predicatesmarker
+
+Examples
+========
+
+Some sample queries:
+
+- Show status of files that appear to be binary in the working directory::
+
+    hg status -A "set:binary()"
+
+- Forget files that are in .hgignore but are already tracked::
+
+    hg forget "set:hgignore() and not ignored()"
+
+- Find text files that contain a string::
+
+    hg files "set:grep(magic) and not binary()"
+
+- Find C files in a non-standard encoding::
+
+    hg files "set:**.c and not encoding('UTF-8')"
+
+- Revert copies of large binary files::
+
+    hg revert "set:copied() and binary() and size('>1M')"
+
+- Revert files that were added to the working directory::
+
+    hg revert "set:revs('wdir()', added())"
+
+- Remove files listed in foo.lst that contain the letter a or b::
+
+    hg remove "set: listfile:foo.lst and (**a* or **b*)"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/flags.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,104 @@
+Most Mercurial commands accept various flags.
+
+Flag names
+==========
+
+Flags for each command are listed in :hg:`help` for that command.
+Additionally, some flags, such as --repository, are global and can be used with
+any command - those are seen in :hg:`help -v`, and can be specified before or
+after the command.
+
+Every flag has at least a long name, such as --repository. Some flags may also
+have a short one-letter name, such as the equivalent -R. Using the short or long
+name is equivalent and has the same effect.
+
+Flags that have a short name can also be bundled together - for instance, to
+specify both --edit (short -e) and --interactive (short -i), one could use::
+
+    hg commit -ei
+
+If any of the bundled flags takes a value (i.e. is not a boolean), it must be
+last, followed by the value::
+
+    hg commit -im 'Message'
+
+Flag types
+==========
+
+Mercurial command-line flags can be strings, numbers, booleans, or lists of
+strings.
+
+Specifying flag values
+======================
+
+The following syntaxes are allowed, assuming a flag 'flagname' with short name
+'f'::
+
+    --flagname=foo
+    --flagname foo
+    -f foo
+    -ffoo
+
+This syntax applies to all non-boolean flags (strings, numbers or lists).
+
+Specifying boolean flags
+========================
+
+Boolean flags do not take a value parameter. To specify a boolean, use the flag
+name to set it to true, or the same name prefixed with 'no-' to set it to
+false::
+
+    hg commit --interactive
+    hg commit --no-interactive
+
+Specifying list flags
+=====================
+
+List flags take multiple values. To specify them, pass the flag multiple times::
+
+    hg files --include mercurial --include tests
+
+Setting flag defaults
+=====================
+
+In order to set a default value for a flag in an hgrc file, it is recommended to
+use aliases::
+
+    [alias]
+    commit = commit --interactive
+
+For more information on hgrc files, see :hg:`help config`.
+
+Overriding flags on the command line
+====================================
+
+If the same non-list flag is specified multiple times on the command line, the
+latest specification is used::
+
+    hg commit -m "Ignored value" -m "Used value"
+
+This includes the use of aliases - e.g., if one has::
+
+    [alias]
+    committemp = commit -m "Ignored value"
+
+then the following command will override that -m::
+
+    hg committemp -m "Used value"
+
+Overriding flag defaults
+========================
+
+Every flag has a default value, and you may also set your own defaults in hgrc
+as described above.
+Except for list flags, defaults can be overridden on the command line simply by
+specifying the flag in that location.
+
+Hidden flags
+============
+
+Some flags are not shown in a command's help by default - specifically, those
+that are deemed to be experimental, deprecated or advanced. To show all flags,
+add the --verbose flag for the help command::
+
+    hg help --verbose commit
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/glossary.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,413 @@
+Ancestor
+    Any changeset that can be reached by an unbroken chain of parent
+    changesets from a given changeset. More precisely, the ancestors
+    of a changeset can be defined by two properties: a parent of a
+    changeset is an ancestor, and a parent of an ancestor is an
+    ancestor. See also: 'Descendant'.
+
+Bookmark
+    Bookmarks are pointers to certain commits that move when
+    committing. They are similar to tags in that it is possible to use
+    bookmark names in all places where Mercurial expects a changeset
+    ID, e.g., with :hg:`update`. Unlike tags, bookmarks move along
+    when you make a commit.
+
+    Bookmarks can be renamed, copied and deleted. Bookmarks are local,
+    unless they are explicitly pushed or pulled between repositories.
+    Pushing and pulling bookmarks allow you to collaborate with others
+    on a branch without creating a named branch.
+
+Branch
+    (Noun) A child changeset that has been created from a parent that
+    is not a head. These are known as topological branches, see
+    'Branch, topological'. If a topological branch is named, it becomes
+    a named branch. If a topological branch is not named, it becomes
+    an anonymous branch. See 'Branch, anonymous' and 'Branch, named'.
+
+    Branches may be created when changes are pulled from or pushed to
+    a remote repository, since new heads may be created by these
+    operations. Note that the term branch can also be used informally
+    to describe a development process in which certain development is
+    done independently of other development. This is sometimes done
+    explicitly with a named branch, but it can also be done locally,
+    using bookmarks or clones and anonymous branches.
+
+    Example: "The experimental branch."
+
+    (Verb) The action of creating a child changeset which results in
+    its parent having more than one child.
+
+    Example: "I'm going to branch at X."
+
+Branch, anonymous
+    Every time a new child changeset is created from a parent that is not
+    a head and the name of the branch is not changed, a new anonymous
+    branch is created.
+
+Branch, closed
+    A named branch whose branch heads have all been closed.
+
+Branch, default
+    The branch assigned to a changeset when no name has previously been
+    assigned.
+
+Branch head
+    See 'Head, branch'.
+
+Branch, inactive
+    If a named branch has no topological heads, it is considered to be
+    inactive. As an example, a feature branch becomes inactive when it
+    is merged into the default branch. The :hg:`branches` command
+    shows inactive branches by default, though they can be hidden with
+    :hg:`branches --active`.
+
+    NOTE: this concept is deprecated because it is too implicit.
+    Branches should now be explicitly closed using :hg:`commit
+    --close-branch` when they are no longer needed.
+
+Branch, named
+    A collection of changesets which have the same branch name. By
+    default, children of a changeset in a named branch belong to the
+    same named branch. A child can be explicitly assigned to a
+    different branch. See :hg:`help branch`, :hg:`help branches` and
+    :hg:`commit --close-branch` for more information on managing
+    branches.
+
+    Named branches can be thought of as a kind of namespace, dividing
+    the collection of changesets that comprise the repository into a
+    collection of disjoint subsets. A named branch is not necessarily
+    a topological branch. If a new named branch is created from the
+    head of another named branch, or the default branch, but no
+    further changesets are added to that previous branch, then that
+    previous branch will be a branch in name only.
+
+Branch tip
+    See 'Tip, branch'.
+
+Branch, topological
+    Every time a new child changeset is created from a parent that is
+    not a head, a new topological branch is created. If a topological
+    branch is named, it becomes a named branch. If a topological
+    branch is not named, it becomes an anonymous branch of the
+    current, possibly default, branch.
+
+Changelog
+    A record of the changesets in the order in which they were added
+    to the repository. This includes details such as changeset id,
+    author, commit message, date, and list of changed files.
+
+Changeset
+    A snapshot of the state of the repository used to record a change.
+
+Changeset, child
+    The converse of parent changeset: if P is a parent of C, then C is
+    a child of P. There is no limit to the number of children that a
+    changeset may have.
+
+Changeset id
+    A SHA-1 hash that uniquely identifies a changeset. It may be
+    represented as either a "long" 40 hexadecimal digit string, or a
+    "short" 12 hexadecimal digit string.
+
+Changeset, merge
+    A changeset with two parents. This occurs when a merge is
+    committed.
+
+Changeset, parent
+    A revision upon which a child changeset is based. Specifically, a
+    parent changeset of a changeset C is a changeset whose node
+    immediately precedes C in the DAG. Changesets have at most two
+    parents.
+
+Checkout
+    (Noun) The working directory being updated to a specific
+    revision. This use should probably be avoided where possible, as
+    changeset is much more appropriate than checkout in this context.
+
+    Example: "I'm using checkout X."
+
+    (Verb) Updating the working directory to a specific changeset. See
+    :hg:`help update`.
+
+    Example: "I'm going to check out changeset X."
+
+Child changeset
+    See 'Changeset, child'.
+
+Close changeset
+    See 'Head, closed branch'.
+
+Closed branch
+    See 'Branch, closed'.
+
+Clone
+    (Noun) An entire or partial copy of a repository. The partial
+    clone must be in the form of a revision and its ancestors.
+
+    Example: "Is your clone up to date?"
+
+    (Verb) The process of creating a clone, using :hg:`clone`.
+
+    Example: "I'm going to clone the repository."
+
+Closed branch head
+    See 'Head, closed branch'.
+
+Commit
+    (Noun) A synonym for changeset.
+
+    Example: "Is the bug fixed in your recent commit?"
+
+    (Verb) The act of recording changes to a repository. When files
+    are committed in a working directory, Mercurial finds the
+    differences between the committed files and their parent
+    changeset, creating a new changeset in the repository.
+
+    Example: "You should commit those changes now."
+
+Cset
+    A common abbreviation of the term changeset.
+
+DAG
+    The repository of changesets of a distributed version control
+    system (DVCS) can be described as a directed acyclic graph (DAG),
+    consisting of nodes and edges, where nodes correspond to
+    changesets and edges imply a parent -> child relation. This graph
+    can be visualized by graphical tools such as :hg:`log --graph`. In
+    Mercurial, the DAG is limited by the requirement for children to
+    have at most two parents.
+
+Deprecated
+    Feature removed from documentation, but not scheduled for removal.
+
+Default branch
+    See 'Branch, default'.
+
+Descendant
+    Any changeset that can be reached by a chain of child changesets
+    from a given changeset. More precisely, the descendants of a
+    changeset can be defined by two properties: the child of a
+    changeset is a descendant, and the child of a descendant is a
+    descendant. See also: 'Ancestor'.
+
+Diff
+    (Noun) The difference between the contents and attributes of files
+    in two changesets or a changeset and the current working
+    directory. The difference is usually represented in a standard
+    form called a "diff" or "patch". The "git diff" format is used
+    when the changes include copies, renames, or changes to file
+    attributes, none of which can be represented/handled by classic
+    "diff" and "patch".
+
+    Example: "Did you see my correction in the diff?"
+
+    (Verb) Diffing two changesets is the action of creating a diff or
+    patch.
+
+    Example: "If you diff with changeset X, you will see what I mean."
+
+Directory, working
+    The working directory represents the state of the files tracked by
+    Mercurial, that will be recorded in the next commit. The working
+    directory initially corresponds to the snapshot at an existing
+    changeset, known as the parent of the working directory. See
+    'Parent, working directory'. The state may be modified by changes
+    to the files introduced manually or by a merge. The repository
+    metadata exists in the .hg directory inside the working directory.
+
+Draft
+    Changesets in the draft phase have not been shared with publishing
+    repositories and may thus be safely changed by history-modifying
+    extensions. See :hg:`help phases`.
+
+Experimental
+    Feature that may change or be removed at a later date.
+
+Graph
+    See DAG and :hg:`log --graph`.
+
+Head
+    The term 'head' may be used to refer to both a branch head or a
+    repository head, depending on the context. See 'Head, branch' and
+    'Head, repository' for specific definitions.
+
+    Heads are where development generally takes place and are the
+    usual targets for update and merge operations.
+
+Head, branch
+    A changeset with no descendants on the same named branch.
+
+Head, closed branch
+    A changeset that marks a head as no longer interesting. The closed
+    head is no longer listed by :hg:`heads`. A branch is considered
+    closed when all its heads are closed and consequently is not
+    listed by :hg:`branches`.
+
+    Closed heads can be re-opened by committing new changeset as the
+    child of the changeset that marks a head as closed.
+
+Head, repository
+    A topological head which has not been closed.
+
+Head, topological
+    A changeset with no children in the repository.
+
+History, immutable
+    Once committed, changesets cannot be altered.  Extensions which
+    appear to change history actually create new changesets that
+    replace existing ones, and then destroy the old changesets. Doing
+    so in public repositories can result in old changesets being
+    reintroduced to the repository.
+
+History, rewriting
+    The changesets in a repository are immutable. However, extensions
+    to Mercurial can be used to alter the repository, usually in such
+    a way as to preserve changeset contents.
+
+Immutable history
+    See 'History, immutable'.
+
+Merge changeset
+    See 'Changeset, merge'.
+
+Manifest
+    Each changeset has a manifest, which is the list of files that are
+    tracked by the changeset.
+
+Merge
+    Used to bring together divergent branches of work. When you update
+    to a changeset and then merge another changeset, you bring the
+    history of the latter changeset into your working directory. Once
+    conflicts are resolved (and marked), this merge may be committed
+    as a merge changeset, bringing two branches together in the DAG.
+
+Named branch
+    See 'Branch, named'.
+
+Null changeset
+    The empty changeset. It is the parent state of newly-initialized
+    repositories and repositories with no checked out revision. It is
+    thus the parent of root changesets and the effective ancestor when
+    merging unrelated changesets. Can be specified by the alias 'null'
+    or by the changeset ID '000000000000'.
+
+Parent
+    See 'Changeset, parent'.
+
+Parent changeset
+    See 'Changeset, parent'.
+
+Parent, working directory
+    The working directory parent reflects a virtual revision which is
+    the child of the changeset (or two changesets with an uncommitted
+    merge) shown by :hg:`parents`. This is changed with
+    :hg:`update`. Other commands to see the working directory parent
+    are :hg:`summary` and :hg:`id`. Can be specified by the alias ".".
+
+Patch
+    (Noun) The product of a diff operation.
+
+    Example: "I've sent you my patch."
+
+    (Verb) The process of using a patch file to transform one
+    changeset into another.
+
+    Example: "You will need to patch that revision."
+
+Phase
+    A per-changeset state tracking how the changeset has been or
+    should be shared. See :hg:`help phases`.
+
+Public
+    Changesets in the public phase have been shared with publishing
+    repositories and are therefore considered immutable. See :hg:`help
+    phases`.
+
+Pull
+    An operation in which changesets in a remote repository which are
+    not in the local repository are brought into the local
+    repository. Note that this operation without special arguments
+    only updates the repository, it does not update the files in the
+    working directory. See :hg:`help pull`.
+
+Push
+    An operation in which changesets in a local repository which are
+    not in a remote repository are sent to the remote repository. Note
+    that this operation only adds changesets which have been committed
+    locally to the remote repository. Uncommitted changes are not
+    sent. See :hg:`help push`.
+
+Repository
+    The metadata describing all recorded states of a collection of
+    files. Each recorded state is represented by a changeset. A
+    repository is usually (but not always) found in the ``.hg``
+    subdirectory of a working directory. Any recorded state can be
+    recreated by "updating" a working directory to a specific
+    changeset.
+
+Repository head
+    See 'Head, repository'.
+
+Revision
+    A state of the repository at some point in time. Earlier revisions
+    can be updated to by using :hg:`update`.  See also 'Revision
+    number'; See also 'Changeset'.
+
+Revision number
+    This integer uniquely identifies a changeset in a specific
+    repository. It represents the order in which changesets were added
+    to a repository, starting with revision number 0. Note that the
+    revision number may be different in each clone of a repository. To
+    identify changesets uniquely between different clones, see
+    'Changeset id'.
+
+Revlog
+    History storage mechanism used by Mercurial. It is a form of delta
+    encoding, with occasional full revision of data followed by delta
+    of each successive revision. It includes data and an index
+    pointing to the data.
+
+Rewriting history
+    See 'History, rewriting'.
+
+Root
+    A changeset that has only the null changeset as its parent. Most
+    repositories have only a single root changeset.
+
+Secret
+    Changesets in the secret phase may not be shared via push, pull,
+    or clone. See :hg:`help phases`.
+
+Tag
+    An alternative name given to a changeset. Tags can be used in all
+    places where Mercurial expects a changeset ID, e.g., with
+    :hg:`update`. The creation of a tag is stored in the history and
+    will thus automatically be shared with other using push and pull.
+
+Tip
+    The changeset with the highest revision number. It is the changeset
+    most recently added in a repository.
+
+Tip, branch
+    The head of a given branch with the highest revision number. When
+    a branch name is used as a revision identifier, it refers to the
+    branch tip. See also 'Branch, head'. Note that because revision
+    numbers may be different in different repository clones, the
+    branch tip may be different in different cloned repositories.
+
+Update
+    (Noun) Another synonym of changeset.
+
+    Example: "I've pushed an update."
+
+    (Verb) This term is usually used to describe updating the state of
+    the working directory to that of a specific changeset. See
+    :hg:`help update`.
+
+    Example: "You should update."
+
+Working directory
+    See 'Directory, working'.
+
+Working directory parent
+    See 'Parent, working directory'.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/hg-ssh.8.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,71 @@
+========
+ hg-ssh
+========
+
+----------------------------------------
+restricted ssh login shell for Mercurial
+----------------------------------------
+
+:Author:         Thomas Arendsen Hein <thomas@intevation.de>
+:Organization:   Mercurial
+:Manual section: 8
+:Manual group:   Mercurial Manual
+
+.. contents::
+   :backlinks: top
+   :class: htmlonly
+   :depth: 1
+
+Synopsis
+""""""""
+**hg-ssh** repositories...
+
+Description
+"""""""""""
+**hg-ssh** is a wrapper for ssh access to a limited set of mercurial repos.
+
+To be used in ~/.ssh/authorized_keys with the "command" option, see sshd(8):
+command="hg-ssh path/to/repo1 /path/to/repo2 ~/repo3 ~user/repo4" ssh-dss ...
+(probably together with these other useful options:
+no-port-forwarding,no-X11-forwarding,no-agent-forwarding)
+
+This allows pull/push over ssh from/to the repositories given as arguments.
+
+If all your repositories are subdirectories of a common directory, you can
+allow shorter paths with:
+command="cd path/to/my/repositories && hg-ssh repo1 subdir/repo2"
+
+You can use pattern matching of your normal shell, e.g.:
+command="cd repos && hg-ssh user/thomas/* projects/{mercurial,foo}"
+
+You can also add a --read-only flag to allow read-only access to a key, e.g.:
+command="hg-ssh --read-only repos/\*"
+
+Bugs
+""""
+Probably lots, please post them to the mailing list (see Resources_
+below) when you find them.
+
+See Also
+""""""""
+|hg(1)|_
+
+Author
+""""""
+Written by Matt Mackall <mpm@selenic.com>
+
+Resources
+"""""""""
+Main Web Site: https://mercurial-scm.org/
+
+Source code repository: https://www.mercurial-scm.org/repo/hg
+
+Mailing list: https://www.mercurial-scm.org/mailman/listinfo/mercurial/
+
+Copying
+"""""""
+Copyright (C) 2005-2016 Matt Mackall.
+Free use of this software is granted under the terms of the GNU General
+Public License version 2 or any later version.
+
+.. include:: common.txt
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/hg.1.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,119 @@
+====
+ hg
+====
+
+---------------------------------------
+Mercurial source code management system
+---------------------------------------
+
+:Author:         Matt Mackall <mpm@selenic.com>
+:Organization:   Mercurial
+:Manual section: 1
+:Manual group:   Mercurial Manual
+
+.. contents::
+   :backlinks: top
+   :class: htmlonly
+   :depth: 1
+
+
+Synopsis
+""""""""
+**hg** *command* [*option*]... [*argument*]...
+
+Description
+"""""""""""
+The **hg** command provides a command line interface to the Mercurial
+system.
+
+Command Elements
+""""""""""""""""
+
+files...
+    indicates one or more filename or relative path filenames; see
+    `File Name Patterns`_ for information on pattern matching
+
+path
+    indicates a path on the local machine
+
+revision
+    indicates a changeset which can be specified as a changeset
+    revision number, a tag, or a unique substring of the changeset
+    hash value
+
+repository path
+    either the pathname of a local repository or the URI of a remote
+    repository.
+
+.. include:: hg.1.gendoc.txt
+
+Files
+"""""
+
+``/etc/mercurial/hgrc``, ``$HOME/.hgrc``, ``.hg/hgrc``
+    This file contains defaults and configuration. Values in
+    ``.hg/hgrc`` override those in ``$HOME/.hgrc``, and these override
+    settings made in the global ``/etc/mercurial/hgrc`` configuration.
+    See |hgrc(5)|_ for details of the contents and format of these
+    files.
+
+``.hgignore``
+    This file contains regular expressions (one per line) that
+    describe file names that should be ignored by **hg**. For details,
+    see |hgignore(5)|_.
+
+``.hgsub``
+    This file defines the locations of all subrepositories, and
+    tells where the subrepository checkouts came from. For details, see
+    :hg:`help subrepos`.
+
+``.hgsubstate``
+    This file is where Mercurial stores all nested repository states. *NB: This
+    file should not be edited manually.*
+
+``.hgtags``
+    This file contains changeset hash values and text tag names (one
+    of each separated by spaces) that correspond to tagged versions of
+    the repository contents. The file content is encoded using UTF-8.
+
+``.hg/last-message.txt``
+    This file is used by :hg:`commit` to store a backup of the commit message
+    in case the commit fails.
+
+``.hg/localtags``
+    This file can be used to define local tags which are not shared among
+    repositories. The file format is the same as for ``.hgtags``, but it is
+    encoded using the local system encoding.
+
+Some commands (e.g. revert) produce backup files ending in ``.orig``,
+if the ``.orig`` file already exists and is not tracked by Mercurial,
+it will be overwritten.
+
+Bugs
+""""
+Probably lots, please post them to the mailing list (see Resources_
+below) when you find them.
+
+See Also
+""""""""
+|hgignore(5)|_, |hgrc(5)|_
+
+Author
+""""""
+Written by Matt Mackall <mpm@selenic.com>
+
+Resources
+"""""""""
+Main Web Site: https://mercurial-scm.org/
+
+Source code repository: https://www.mercurial-scm.org/repo/hg
+
+Mailing list: https://www.mercurial-scm.org/mailman/listinfo/mercurial/
+
+Copying
+"""""""
+Copyright (C) 2005-2019 Matt Mackall.
+Free use of this software is granted under the terms of the GNU General
+Public License version 2 or any later version.
+
+.. include:: common.txt
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/hgignore.5.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,34 @@
+==========
+ hgignore
+==========
+
+---------------------------------
+syntax for Mercurial ignore files
+---------------------------------
+
+:Author:         Vadim Gelfer <vadim.gelfer@gmail.com>
+:Organization:   Mercurial
+:Manual section: 5
+:Manual group:   Mercurial Manual
+
+.. include:: hgignore.5.gendoc.txt
+
+Author
+======
+Vadim Gelfer <vadim.gelfer@gmail.com>
+
+Mercurial was written by Matt Mackall <mpm@selenic.com>.
+
+See Also
+========
+|hg(1)|_, |hgrc(5)|_
+
+Copying
+=======
+This manual page is copyright 2006 Vadim Gelfer.
+Mercurial is copyright 2005-2019 Matt Mackall.
+Free use of this software is granted under the terms of the GNU General
+Public License version 2 or any later version.
+
+.. include:: common.txt
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/hgignore.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,97 @@
+Synopsis
+========
+
+The Mercurial system uses a file called ``.hgignore`` in the root
+directory of a repository to control its behavior when it searches
+for files that it is not currently tracking.
+
+Description
+===========
+
+The working directory of a Mercurial repository will often contain
+files that should not be tracked by Mercurial. These include backup
+files created by editors and build products created by compilers.
+These files can be ignored by listing them in a ``.hgignore`` file in
+the root of the working directory. The ``.hgignore`` file must be
+created manually. It is typically put under version control, so that
+the settings will propagate to other repositories with push and pull.
+
+An untracked file is ignored if its path relative to the repository
+root directory, or any prefix path of that path, is matched against
+any pattern in ``.hgignore``.
+
+For example, say we have an untracked file, ``file.c``, at
+``a/b/file.c`` inside our repository. Mercurial will ignore ``file.c``
+if any pattern in ``.hgignore`` matches ``a/b/file.c``, ``a/b`` or ``a``.
+
+In addition, a Mercurial configuration file can reference a set of
+per-user or global ignore files. See the ``ignore`` configuration
+key on the ``[ui]`` section of :hg:`help config` for details of how to
+configure these files.
+
+To control Mercurial's handling of files that it manages, many
+commands support the ``-I`` and ``-X`` options; see
+:hg:`help <command>` and :hg:`help patterns` for details.
+
+Files that are already tracked are not affected by .hgignore, even
+if they appear in .hgignore. An untracked file X can be explicitly
+added with :hg:`add X`, even if X would be excluded by a pattern
+in .hgignore.
+
+Syntax
+======
+
+An ignore file is a plain text file consisting of a list of patterns,
+with one pattern per line. Empty lines are skipped. The ``#``
+character is treated as a comment character, and the ``\`` character
+is treated as an escape character.
+
+Mercurial supports several pattern syntaxes. The default syntax used
+is Python/Perl-style regular expressions.
+
+To change the syntax used, use a line of the following form::
+
+  syntax: NAME
+
+where ``NAME`` is one of the following:
+
+``regexp``
+  Regular expression, Python/Perl syntax.
+``glob``
+  Shell-style glob.
+``rootglob``
+  A variant of ``glob`` that is rooted (see below).
+
+The chosen syntax stays in effect when parsing all patterns that
+follow, until another syntax is selected.
+
+Neither ``glob`` nor regexp patterns are rooted. A glob-syntax
+pattern of the form ``*.c`` will match a file ending in ``.c`` in any
+directory, and a regexp pattern of the form ``\.c$`` will do the
+same. To root a regexp pattern, start it with ``^``. To get the same
+effect with glob-syntax, you have to use ``rootglob``.
+
+Subdirectories can have their own .hgignore settings by adding
+``subinclude:path/to/subdir/.hgignore`` to the root ``.hgignore``. See
+:hg:`help patterns` for details on ``subinclude:`` and ``include:``.
+
+.. note::
+
+  Patterns specified in other than ``.hgignore`` are always rooted.
+  Please see :hg:`help patterns` for details.
+
+Example
+=======
+
+Here is an example ignore file. ::
+
+  # use glob syntax.
+  syntax: glob
+
+  *.elc
+  *.pyc
+  *~
+
+  # switch to regexp syntax.
+  syntax: regexp
+  ^\.pc/
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/hgrc.5.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,41 @@
+======
+ hgrc
+======
+
+---------------------------------
+configuration files for Mercurial
+---------------------------------
+
+:Author:         Bryan O'Sullivan <bos@serpentine.com>
+:Organization:   Mercurial
+:Manual section: 5
+:Manual group:   Mercurial Manual
+
+.. contents::
+   :backlinks: top
+   :class: htmlonly
+
+
+Description
+===========
+
+.. include:: hgrc.5.gendoc.txt
+
+Author
+======
+Bryan O'Sullivan <bos@serpentine.com>.
+
+Mercurial was written by Matt Mackall <mpm@selenic.com>.
+
+See Also
+========
+|hg(1)|_, |hgignore(5)|_
+
+Copying
+=======
+This manual page is copyright 2005 Bryan O'Sullivan.
+Mercurial is copyright 2005-2019 Matt Mackall.
+Free use of this software is granted under the terms of the GNU General
+Public License version 2 or any later version.
+
+.. include:: common.txt
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/hgweb.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,86 @@
+Mercurial's internal web server, hgweb, can serve either a single
+repository, or a tree of repositories. In the second case, repository
+paths and global options can be defined using a dedicated
+configuration file common to :hg:`serve`, ``hgweb.wsgi``,
+``hgweb.cgi`` and ``hgweb.fcgi``.
+
+This file uses the same syntax as other Mercurial configuration files
+but recognizes only the following sections:
+
+  - web
+  - paths
+  - collections
+
+The ``web`` options are thoroughly described in :hg:`help config`.
+
+The ``paths`` section maps URL paths to paths of repositories in the
+filesystem. hgweb will not expose the filesystem directly - only
+Mercurial repositories can be published and only according to the
+configuration.
+
+The left hand side is the path in the URL. Note that hgweb reserves
+subpaths like ``rev`` or ``file``, try using different names for
+nested repositories to avoid confusing effects.
+
+The right hand side is the path in the filesystem. If the specified
+path ends with ``*`` or ``**`` the filesystem will be searched
+recursively for repositories below that point.
+With ``*`` it will not recurse into the repositories it finds (except for
+``.hg/patches``).
+With ``**`` it will also search inside repository working directories
+and possibly find subrepositories.
+
+In this example::
+
+  [paths]
+  /projects/a = /srv/tmprepos/a
+  /projects/b = c:/repos/b
+  / = /srv/repos/*
+  /user/bob = /home/bob/repos/**
+
+- The first two entries make two repositories in different directories
+  appear under the same directory in the web interface
+- The third entry will publish every Mercurial repository found in
+  ``/srv/repos/``, for instance the repository ``/srv/repos/quux/``
+  will appear as ``http://server/quux/``
+- The fourth entry will publish both ``http://server/user/bob/quux/``
+  and ``http://server/user/bob/quux/testsubrepo/``
+
+The ``collections`` section is deprecated and has been superseded by
+``paths``.
+
+URLs and Common Arguments
+=========================
+
+URLs under each repository have the form ``/{command}[/{arguments}]``
+where ``{command}`` represents the name of a command or handler and
+``{arguments}`` represents any number of additional URL parameters
+to that command.
+
+The web server has a default style associated with it. Styles map to
+a collection of named templates. Each template is used to render a
+specific piece of data, such as a changeset or diff.
+
+The style for the current request can be overridden two ways. First,
+if ``{command}`` contains a hyphen (``-``), the text before the hyphen
+defines the style. For example, ``/atom-log`` will render the ``log``
+command handler with the ``atom`` style. The second way to set the
+style is with the ``style`` query string argument. For example,
+``/log?style=atom``. The hyphenated URL parameter is preferred.
+
+Not all templates are available for all styles. Attempting to use
+a style that doesn't have all templates defined may result in an error
+rendering the page.
+
+Many commands take a ``{revision}`` URL parameter. This defines the
+changeset to operate on. This is commonly specified as the short,
+12 digit hexadecimal abbreviation for the full 40 character unique
+revision identifier. However, any value described by
+:hg:`help revisions` typically works.
+
+Commands and URLs
+=================
+
+The following web commands and their URLs are available:
+
+  .. webcommandsmarker
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/bundle2.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,677 @@
+Bundle2 refers to a data format that is used for both on-disk storage
+and over-the-wire transfer of repository data and state.
+
+The data format allows the capture of multiple components of
+repository data. Contrast with the initial bundle format, which
+only captured *changegroup* data (and couldn't store bookmarks,
+phases, etc).
+
+Bundle2 is used for:
+
+* Transferring data from a repository (e.g. as part of an ``hg clone``
+  or ``hg pull`` operation).
+* Transferring data to a repository (e.g. as part of an ``hg push``
+  operation).
+* Storing data on disk (e.g. the result of an ``hg bundle``
+  operation).
+* Transferring the results of a repository operation (e.g. the
+  reply to an ``hg push`` operation).
+
+At its highest level, a bundle2 payload is a stream that begins
+with some metadata and consists of a series of *parts*, with each
+part describing repository data or state or the result of an
+operation. New bundle2 parts are introduced over time when there is
+a need to capture a new form of data. A *capabilities* mechanism
+exists to allow peers to understand which bundle2 parts the other
+understands.
+
+Stream Format
+=============
+
+A bundle2 payload consists of a magic string (``HG20``) followed by
+stream level parameters, followed by any number of payload *parts*.
+
+It may help to think of the stream level parameters as *headers* and the
+payload parts as the *body*.
+
+Stream Level Parameters
+-----------------------
+
+Following the magic string is data that defines parameters applicable to the
+entire payload.
+
+Stream level parameters begin with a 32-bit unsigned big-endian integer.
+The value of this integer defines the number of bytes of stream level
+parameters that follow.
+
+The *N* bytes of raw data contains a space separated list of parameters.
+Each parameter consists of a required name and an optional value.
+
+Parameters have the form ``<name>`` or ``<name>=<value>``.
+
+Both the parameter name and value are URL quoted.
+
+Names MUST start with a letter. If the first letter is lower case, the
+parameter is advisory and can safely be ignored. If the first letter
+is upper case, the parameter is mandatory and the handler MUST stop if
+it is unable to process it.
+
+Stream level parameters apply to the entire bundle2 payload. Lower-level
+options should go into a bundle2 part instead.
+
+The following stream level parameters are defined:
+
+Compression
+   Compression format of payload data. ``GZ`` denotes zlib. ``BZ``
+   denotes bzip2. ``ZS`` denotes zstandard.
+
+   When defined, all bytes after the stream level parameters are
+   compressed using the compression format defined by this parameter.
+
+   If this parameter isn't present, data is raw/uncompressed.
+
+   This parameter MUST be mandatory because attempting to consume
+   streams without knowing how to decode the underlying bytes will
+   result in errors.
+
+Payload Part
+------------
+
+Following the stream level parameters are 0 or more payload parts. Each
+payload part consists of a header and a body.
+
+The payload part header consists of a 32-bit unsigned big-endian integer
+defining the number of bytes in the header that follow. The special
+value ``0`` indicates the end of the bundle2 stream.
+
+The binary format of the part header is as follows:
+
+* 8-bit unsigned size of the part name
+* N-bytes alphanumeric part name
+* 32-bit unsigned big-endian part ID
+* N bytes part parameter data
+
+The *part name* identifies the type of the part. A part name with an
+UPPERCASE letter is mandatory. Otherwise, the part is advisory. A
+consumer should abort if it encounters a mandatory part it doesn't know
+how to process. See the sections below for each defined part type.
+
+The *part ID* is a unique identifier within the bundle used to refer to a
+specific part. It should be unique within the bundle2 payload.
+
+Part parameter data consists of:
+
+* 1 byte number of mandatory parameters
+* 1 byte number of advisory parameters
+* 2 * N bytes of sizes of parameter key and values
+* N * M blobs of values for parameter key and values
+
+Following the 2 bytes of mandatory and advisory parameter counts are
+2-tuples of bytes of the sizes of each parameter. e.g.
+(<key size>, <value size>).
+
+Following that are the raw values, without padding. Mandatory parameters
+come first, followed by advisory parameters.
+
+Each parameter's key MUST be unique within the part.
+
+Following the part parameter data is the part payload. The part payload
+consists of a series of framed chunks. The frame header is a 32-bit
+big-endian integer defining the size of the chunk. The N bytes of raw
+payload data follows.
+
+The part payload consists of 0 or more chunks.
+
+A chunk with size ``0`` denotes the end of the part payload. Therefore,
+there will always be at least 1 32-bit integer following the payload
+part header.
+
+A chunk size of ``-1`` is used to signal an *interrupt*. If such a chunk
+size is seen, the stream processor should process the next bytes as a new
+payload part. After this payload part, processing of the original,
+interrupted part should resume.
+
+Capabilities
+============
+
+Bundle2 is a dynamic format that can evolve over time. For example,
+when a new repository data concept is invented, a new bundle2 part
+is typically invented to hold that data. In addition, parts performing
+similar functionality may come into existence if there is a better
+mechanism for performing certain functionality.
+
+Because the bundle2 format evolves over time, peers need to understand
+what bundle2 features the other can understand. The *capabilities*
+mechanism is how those features are expressed.
+
+Bundle2 capabilities are logically expressed as a dictionary of
+string key-value pairs where the keys are strings and the values
+are lists of strings.
+
+Capabilities are encoded for exchange between peers. The encoded
+capabilities blob consists of a newline (``\n``) delimited list of
+entries. Each entry has the form ``<key>`` or ``<key>=<value>``,
+depending if the capability has a value.
+
+The capability name is URL quoted (``%XX`` encoding of URL unsafe
+characters).
+
+The value, if present, is formed by URL quoting each value in
+the capability list and concatenating the result with a comma (``,``).
+
+For example, the capabilities ``novaluekey`` and ``listvaluekey``
+with values ``value 1`` and ``value 2``. This would be encoded as:
+
+   listvaluekey=value%201,value%202\nnovaluekey
+
+The sections below detail the defined bundle2 capabilities.
+
+HG20
+----
+
+Denotes that the peer supports the bundle2 data format.
+
+bookmarks
+---------
+
+Denotes that the peer supports the ``bookmarks`` part.
+
+Peers should not issue mandatory ``bookmarks`` parts unless this
+capability is present.
+
+changegroup
+-----------
+
+Denotes which versions of the *changegroup* format the peer can
+receive. Values include ``01``, ``02``, and ``03``.
+
+The peer should not generate changegroup data for a version not
+specified by this capability.
+
+checkheads
+----------
+
+Denotes which forms of heads checking the peer supports.
+
+If ``related`` is in the value, then the peer supports the ``check:heads``
+part and the peer is capable of detecting race conditions when applying
+changelog data.
+
+digests
+-------
+
+Denotes which hashing formats the peer supports.
+
+Values are names of hashing function. Values include ``md5``, ``sha1``,
+and ``sha512``.
+
+error
+-----
+
+Denotes which ``error:`` parts the peer supports.
+
+Value is a list of strings of ``error:`` part names. Valid values
+include ``abort``, ``unsupportecontent``, ``pushraced``, and ``pushkey``.
+
+Peers should not issue an ``error:`` part unless the type of that
+part is listed as supported by this capability.
+
+listkeys
+--------
+
+Denotes that the peer supports the ``listkeys`` part.
+
+hgtagsfnodes
+------------
+
+Denotes that the peer supports the ``hgtagsfnodes`` part.
+
+obsmarkers
+----------
+
+Denotes that the peer supports the ``obsmarker`` part and which versions
+of the obsolescence data format it can receive. Values are strings like
+``V<N>``. e.g. ``V1``.
+
+phases
+------
+
+Denotes that the peer supports the ``phases`` part.
+
+pushback
+--------
+
+Denotes that the peer supports sending/receiving bundle2 data in response
+to a bundle2 request.
+
+This capability is typically used by servers that employ server-side
+rewriting of pushed repository data. For example, a server may wish to
+automatically rebase pushed changesets. When this capability is present,
+the server can send a bundle2 response containing the rewritten changeset
+data and the client will apply it.
+
+pushkey
+-------
+
+Denotes that the peer supports the ``puskey`` part.
+
+remote-changegroup
+------------------
+
+Denotes that the peer supports the ``remote-changegroup`` part and
+which protocols it can use to fetch remote changegroup data.
+
+Values are protocol names. e.g. ``http`` and ``https``.
+
+stream
+------
+
+Denotes that the peer supports ``stream*`` parts in order to support
+*stream clone*.
+
+Values are which ``stream*`` parts the peer supports. ``v2`` denotes
+support for the ``stream2`` part.
+
+Bundle2 Part Types
+==================
+
+The sections below detail the various bundle2 part types.
+
+bookmarks
+---------
+
+The ``bookmarks`` part holds bookmarks information.
+
+This part has no parameters.
+
+The payload consists of entries defining bookmarks. Each entry consists of:
+
+* 20 bytes binary changeset node.
+* 2 bytes big endian short defining bookmark name length.
+* N bytes defining bookmark name.
+
+Receivers typically update bookmarks to match the state specified in
+this part.
+
+changegroup
+-----------
+
+The ``changegroup`` part contains *changegroup* data (changelog, manifestlog,
+and filelog revision data).
+
+The following part parameters are defined for this part.
+
+version
+   Changegroup version string. e.g. ``01``, ``02``, and ``03``. This parameter
+   determines how to interpret the changegroup data within the part.
+
+nbchanges
+   The number of changesets in this changegroup. This parameter can be used
+   to aid in the display of progress bars, etc during part application.
+
+treemanifest
+   Whether the changegroup contains tree manifests.
+
+targetphase
+   The target phase of changesets in this part. Value is an integer of
+   the target phase.
+
+The payload of this part is raw changegroup data. See
+:hg:`help internals.changegroups` for the format of changegroup data.
+
+check:bookmarks
+---------------
+
+The ``check:bookmarks`` part is inserted into a bundle as a means for the
+receiver to validate that the sender's known state of bookmarks matches
+the receiver's.
+
+This part has no parameters.
+
+The payload is a binary stream of bookmark data. Each entry in the stream
+consists of:
+
+* 20 bytes binary node that bookmark is associated with
+* 2 bytes unsigned short defining length of bookmark name
+* N bytes containing the bookmark name
+
+If all bits in the node value are ``1``, then this signifies a missing
+bookmark.
+
+When the receiver encounters this part, for each bookmark in the part
+payload, it should validate that the current bookmark state matches
+the specified state. If it doesn't, then the receiver should take
+appropriate action. (In the case of pushes, this mismatch signifies
+a race condition and the receiver should consider rejecting the push.)
+
+check:heads
+-----------
+
+The ``check:heads`` part is a means to validate that the sender's state
+of DAG heads matches the receiver's.
+
+This part has no parameters.
+
+The body of this part is an array of 20 byte binary nodes representing
+changeset heads.
+
+Receivers should compare the set of heads defined in this part to the
+current set of repo heads and take action if there is a mismatch in that
+set.
+
+Note that this part applies to *all* heads in the repo.
+
+check:phases
+------------
+
+The ``check:phases`` part validates that the sender's state of phase
+boundaries matches the receiver's.
+
+This part has no parameters.
+
+The payload consists of an array of 24 byte entries. Each entry is
+a big endian 32-bit integer defining the phase integer and 20 byte
+binary node value.
+
+For each changeset defined in this part, the receiver should validate
+that its current phase matches the phase defined in this part. The
+receiver should take appropriate action if a mismatch occurs.
+
+check:updated-heads
+-------------------
+
+The ``check:updated-heads`` part validates that the sender's state of
+DAG heads updated by this bundle matches the receiver's.
+
+This type is nearly identical to ``check:heads`` except the heads
+in the payload are only a subset of heads in the repository. The
+receiver should validate that all nodes specified by the sender are
+branch heads and take appropriate action if not.
+
+error:abort
+-----------
+
+The ``error:abort`` part conveys a fatal error.
+
+The following part parameters are defined:
+
+message
+   The string content of the error message.
+
+hint
+   Supplemental string giving a hint on how to fix the problem.
+
+error:pushkey
+-------------
+
+The ``error:pushkey`` part conveys an error in the *pushkey* protocol.
+
+The following part parameters are defined:
+
+namespace
+   The pushkey domain that exhibited the error.
+
+key
+   The key whose update failed.
+
+new
+   The value we tried to set the key to.
+
+old
+   The old value of the key (as supplied by the client).
+
+ret
+   The integer result code for the pushkey request.
+
+in-reply-to
+   Part ID that triggered this error.
+
+This part is generated if there was an error applying *pushkey* data.
+Pushkey data includes bookmarks, phases, and obsolescence markers.
+
+error:pushraced
+---------------
+
+The ``error:pushraced`` part conveys that an error occurred and
+the likely cause is losing a race with another pusher.
+
+The following part parameters are defined:
+
+message
+   String error message.
+
+This part is typically emitted when a receiver examining ``check:*``
+parts encountered inconsistency between incoming state and local state.
+The likely cause of that inconsistency is another repository change
+operation (often another client performing an ``hg push``).
+
+error:unsupportedcontent
+------------------------
+
+The ``error:unsupportedcontent`` part conveys that a bundle2 receiver
+encountered a part or content it was not able to handle.
+
+The following part parameters are defined:
+
+parttype
+   The name of the part that triggered this error.
+
+params
+   ``\0`` delimited list of parameters.
+
+hgtagsfnodes
+------------
+
+The ``hgtagsfnodes`` type defines file nodes for the ``.hgtags`` file
+for various changesets.
+
+This part has no parameters.
+
+The payload is an array of pairs of 20 byte binary nodes. The first node
+is a changeset node. The second node is the ``.hgtags`` file node.
+
+Resolving tags requires resolving the ``.hgtags`` file node for changesets.
+On large repositories, this can be expensive. Repositories cache the
+mapping of changeset to ``.hgtags`` file node on disk as a performance
+optimization. This part allows that cached data to be transferred alongside
+changeset data.
+
+Receivers should update their ``.hgtags`` cache file node mappings with
+the incoming data.
+
+listkeys
+--------
+
+The ``listkeys`` part holds content for a *pushkey* namespace.
+
+The following part parameters are defined:
+
+namespace
+   The pushkey domain this data belongs to.
+
+The part payload contains a newline (``\n``) delimited list of
+tab (``\t``) delimited key-value pairs defining entries in this pushkey
+namespace.
+
+obsmarkers
+----------
+
+The ``obsmarkers`` part defines obsolescence markers.
+
+This part has no parameters.
+
+The payload consists of obsolescence markers using the on-disk markers
+format. The first byte defines the version format.
+
+The receiver should apply the obsolescence markers defined in this
+part. A ``reply:obsmarkers`` part should be sent to the sender, if possible.
+
+output
+------
+
+The ``output`` part is used to display output on the receiver.
+
+This part has no parameters.
+
+The payload consists of raw data to be printed on the receiver.
+
+phase-heads
+-----------
+
+The ``phase-heads`` part defines phase boundaries.
+
+This part has no parameters.
+
+The payload consists of an array of 24 byte entries. Each entry is
+a big endian 32-bit integer defining the phase integer and 20 byte
+binary node value.
+
+pushkey
+-------
+
+The ``pushkey`` part communicates an intent to perform a ``pushkey``
+request.
+
+The following part parameters are defined:
+
+namespace
+   The pushkey domain to operate on.
+
+key
+   The key within the pushkey namespace that is being changed.
+
+old
+   The old value for the key being changed.
+
+new
+   The new value for the key being changed.
+
+This part has no payload.
+
+The receiver should perform a pushkey operation as described by this
+part's parameters.
+
+If the pushey operation fails, a ``reply:pushkey`` part should be sent
+back to the sender, if possible. The ``in-reply-to`` part parameter
+should reference the source part.
+
+pushvars
+--------
+
+The ``pushvars`` part defines environment variables that should be
+set when processing this bundle2 payload.
+
+The part's advisory parameters define environment variables.
+
+There is no part payload.
+
+When received, part parameters are prefixed with ``USERVAR_`` and the
+resulting variables are defined in the hooks context for the current
+bundle2 application. This part provides a mechanism for senders to
+inject extra state into the hook execution environment on the receiver.
+
+remote-changegroup
+------------------
+
+The ``remote-changegroup`` part defines an external location of a bundle
+to apply. This part can be used by servers to serve pre-generated bundles
+hosted at arbitrary URLs.
+
+The following part parameters are defined:
+
+url
+   The URL of the remote bundle.
+
+size
+   The size in bytes of the remote bundle.
+
+digests
+   A space separated list of the digest types provided in additional
+   part parameters.
+
+digest:<type>
+   The hexadecimal representation of the digest (hash) of the remote bundle.
+
+There is no payload for this part type.
+
+When encountered, clients should attempt to fetch the URL being advertised
+and read and apply it as a bundle.
+
+The ``size`` and ``digest:<type>`` parameters should be used to validate
+that the downloaded bundle matches what was advertised. If a mismatch occurs,
+the client should abort.
+
+reply:changegroup
+-----------------
+
+The ``reply:changegroup`` part conveys the results of application of a
+``changegroup`` part.
+
+The following part parameters are defined:
+
+return
+   Integer return code from changegroup application.
+
+in-reply-to
+   Part ID of part this reply is in response to.
+
+reply:obsmarkers
+----------------
+
+The ``reply:obsmarkers`` part conveys the results of applying an
+``obsmarkers`` part.
+
+The following part parameters are defined:
+
+new
+   The integer number of new markers that were applied.
+
+in-reply-to
+   The part ID that this part is in reply to.
+
+reply:pushkey
+-------------
+
+The ``reply:pushkey`` part conveys the result of a *pushkey* operation.
+
+The following part parameters are defined:
+
+return
+   Integer result code from pushkey operation.
+
+in-reply-to
+   Part ID that triggered this pushkey operation.
+
+This part has no payload.
+
+replycaps
+---------
+
+The ``replycaps`` part notifies the receiver that a reply bundle should
+be created.
+
+This part has no parameters.
+
+The payload consists of a bundle2 capabilities blob.
+
+stream2
+-------
+
+The ``stream2`` part contains *streaming clone* version 2 data.
+
+The following part parameters are defined:
+
+requirements
+   URL quoted repository requirements string. Requirements are delimited by a
+   command (``,``).
+
+filecount
+   The total number of files being transferred in the payload.
+
+bytecount
+   The total size of file content being transferred in the payload.
+
+The payload consists of raw stream clone version 2 data.
+
+The ``filecount`` and ``bytecount`` parameters can be used for progress and
+reporting purposes. The values may not be exact.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/bundles.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,93 @@
+A bundle is a container for repository data.
+
+Bundles are used as standalone files as well as the interchange format
+over the wire protocol used when two Mercurial peers communicate with
+each other.
+
+Headers
+=======
+
+Bundles produced since Mercurial 0.7 (September 2005) have a 4 byte
+header identifying the major bundle type. The header always begins with
+``HG`` and the follow 2 bytes indicate the bundle type/version. Some
+bundle types have additional data after this 4 byte header.
+
+The following sections describe each bundle header/type.
+
+HG10
+----
+
+``HG10`` headers indicate a *changegroup bundle*. This is the original
+bundle format, so it is sometimes referred to as *bundle1*. It has been
+present since version 0.7 (released September 2005).
+
+This header is followed by 2 bytes indicating the compression algorithm
+used for data that follows. All subsequent data following this
+compression identifier is compressed according to the algorithm/method
+specified.
+
+Supported algorithms include the following.
+
+``BZ``
+   *bzip2* compression.
+
+   Bzip2 compressors emit a leading ``BZ`` header. Mercurial uses this
+   leading ``BZ`` as part of the bundle header. Therefore consumers
+   of bzip2 bundles need to *seed* the bzip2 decompressor with ``BZ`` or
+   seek the input stream back to the beginning of the algorithm component
+   of the bundle header so that decompressor input is valid. This behavior
+   is unique among supported compression algorithms.
+
+   Supported since version 0.7 (released December 2006).
+
+``GZ``
+  *zlib* compression.
+
+   Supported since version 0.9.2 (released December 2006).
+
+``UN``
+  *Uncompressed* or no compression. Unmodified changegroup data follows.
+
+  Supported since version 0.9.2 (released December 2006).
+
+3rd party extensions may implement their own compression. However, no
+authority reserves values for their compression algorithm identifiers.
+
+HG2X
+----
+
+``HG2X`` headers (where ``X`` is any value) denote a *bundle2* bundle.
+Bundle2 bundles are a container format for various kinds of repository
+data and capabilities, beyond changegroup data (which was the only data
+supported by ``HG10`` bundles.
+
+``HG20`` is currently the only defined bundle2 version.
+
+The ``HG20`` format is documented at :hg:`help internals.bundle2`.
+
+Initial ``HG20`` support was added in Mercurial 3.0 (released May
+2014). However, bundle2 bundles were hidden behind an experimental flag
+until version 3.5 (released August 2015), when they were enabled in the
+wire protocol. Various commands (including ``hg bundle``) did not
+support generating bundle2 files until Mercurial 3.6 (released November
+2015).
+
+HGS1
+----
+
+*Experimental*
+
+A ``HGS1`` header indicates a *streaming clone bundle*. This is a bundle
+that contains raw revlog data from a repository store. (Typically revlog
+data is exchanged in the form of changegroups.)
+
+The purpose of *streaming clone bundles* are to *clone* repository data
+very efficiently.
+
+The ``HGS1`` header is always followed by 2 bytes indicating a
+compression algorithm of the data that follows. Only ``UN``
+(uncompressed data) is currently allowed.
+
+``HGS1UN`` support was added as an experimental feature in version 3.6
+(released November 2015) as part of the initial offering of the *clone
+bundles* feature.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/cbor.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,130 @@
+Mercurial uses Concise Binary Object Representation (CBOR)
+(RFC 7049) for various data formats.
+
+This document describes the subset of CBOR that Mercurial uses and
+gives recommendations for appropriate use of CBOR within Mercurial.
+
+Type Limitations
+================
+
+Major types 0 and 1 (unsigned integers and negative integers) MUST be
+fully supported.
+
+Major type 2 (byte strings) MUST be fully supported. However, there
+are limitations around the use of indefinite-length byte strings.
+(See below.)
+
+Major type 3 (text strings) are NOT supported.
+
+Major type 4 (arrays) MUST be supported. However, values are limited
+to the set of types described in the "Container Types" section below.
+And indefinite-length arrays are NOT supported.
+
+Major type 5 (maps) MUST be supported. However, key values are limited
+to the set of types described in the "Container Types" section below.
+And indefinite-length maps are NOT supported.
+
+Major type 6 (semantic tagging of major types) can be used with the
+following semantic tag values:
+
+258
+   Mathematical finite set. Suitable for representing Python's
+   ``set`` type.
+
+All other semantic tag values are not allowed.
+
+Major type 7 (simple data types) can be used with the following
+type values:
+
+20
+   False
+21
+   True
+22
+   Null
+31
+   Break stop code (for indefinite-length items).
+
+All other simple data type values (including every value requiring the
+1 byte extension) are disallowed.
+
+Indefinite-Length Byte Strings
+==============================
+
+Indefinite-length byte strings (major type 2) are allowed. However,
+they MUST NOT occur inside a container type (such as an array or map).
+i.e. they can only occur as the "top-most" element in a stream of
+values.
+
+Encoders and decoders SHOULD *stream* indefinite-length byte strings.
+i.e. an encoder or decoder SHOULD NOT buffer the entirety of a long
+byte string value when indefinite-length byte strings are being used
+if it can be avoided. Mercurial MAY use extremely long indefinite-length
+byte strings and buffering the source or destination value COULD lead to
+memory exhaustion.
+
+Chunks in an indefinite-length byte string SHOULD NOT exceed 2^20
+bytes.
+
+Container Types
+===============
+
+Mercurial may use the array (major type 4), map (major type 5), and
+set (semantic tag 258 plus major type 4 array) container types.
+
+An array may contain any supported type as values.
+
+A map MUST only use the following types as keys:
+
+* unsigned integers (major type 0)
+* negative integers (major type 1)
+* byte strings (major type 2) (but not indefinite-length byte strings)
+* false (simple type 20)
+* true (simple type 21)
+* null (simple type 22)
+
+A map MUST only use the following types as values:
+
+* all types supported as map keys
+* arrays
+* maps
+* sets
+
+A set may only use the following types as values:
+
+* all types supported as map keys
+
+It is recommended that keys in maps and values in sets and arrays all
+be of a uniform type.
+
+Avoiding Large Byte Strings
+===========================
+
+The use of large byte strings is discouraged, especially in scenarios where
+the total size of the byte string may by unbound for some inputs (e.g. when
+representing the content of a tracked file). It is highly recommended to use
+indefinite-length byte strings for these purposes.
+
+Since indefinite-length byte strings cannot be nested within an outer
+container (such as an array or map), to associate a large byte string
+with another data structure, it is recommended to use an array or
+map followed immediately by an indefinite-length byte string. For example,
+instead of the following map::
+
+   {
+      "key1": "value1",
+      "key2": "value2",
+      "long_value": "some very large value...",
+   }
+
+Use a map followed by a byte string:
+
+   {
+      "key1": "value1",
+      "key2": "value2",
+      "value_follows": True,
+   }
+   <BEGIN INDEFINITE-LENGTH BYTE STRING>
+   "some very large value"
+   "..."
+   <END INDEFINITE-LENGTH BYTE STRING>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/censor.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,22 @@
+The censor system allows retroactively removing content from
+files. Actually censoring a node requires using the censor extension,
+but the functionality for handling censored nodes is partially in core.
+
+Censored nodes in a filelog have the flag ``REVIDX_ISCENSORED`` set,
+and the contents of the censored node are replaced with a censor
+tombstone. For historical reasons, the tombstone is packed in the
+filelog metadata field ``censored``. This allows censored nodes to be
+(mostly) safely transmitted through old formats like changegroup
+versions 1 and 2. When using changegroup formats older than 3, the
+receiver is required to re-add the ``REVIDX_ISCENSORED`` flag when
+storing the revision. This depends on the ``censored`` metadata key
+never being used for anything other than censoring revisions, which is
+true as of January 2017. Note that the revlog flag is the
+authoritative marker of a censored node: the tombstone should only be
+consulted when looking for a reason a node was censored or when revlog
+flags are unavailable as mentioned above.
+
+The tombstone data is a free-form string. It's expected that users of
+censor will want to record the reason for censoring a node in the
+tombstone. Censored nodes must be able to fit in the size of the
+content being censored.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/changegroups.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,207 @@
+Changegroups are representations of repository revlog data, specifically
+the changelog data, root/flat manifest data, treemanifest data, and
+filelogs.
+
+There are 3 versions of changegroups: ``1``, ``2``, and ``3``. From a
+high-level, versions ``1`` and ``2`` are almost exactly the same, with the
+only difference being an additional item in the *delta header*. Version
+``3`` adds support for storage flags in the *delta header* and optionally
+exchanging treemanifests (enabled by setting an option on the
+``changegroup`` part in the bundle2).
+
+Changegroups when not exchanging treemanifests consist of 3 logical
+segments::
+
+   +---------------------------------+
+   |           |          |          |
+   | changeset | manifest | filelogs |
+   |           |          |          |
+   |           |          |          |
+   +---------------------------------+
+
+When exchanging treemanifests, there are 4 logical segments::
+
+   +-------------------------------------------------+
+   |           |          |               |          |
+   | changeset |   root   | treemanifests | filelogs |
+   |           | manifest |               |          |
+   |           |          |               |          |
+   +-------------------------------------------------+
+
+The principle building block of each segment is a *chunk*. A *chunk*
+is a framed piece of data::
+
+   +---------------------------------------+
+   |           |                           |
+   |  length   |           data            |
+   | (4 bytes) |   (<length - 4> bytes)    |
+   |           |                           |
+   +---------------------------------------+
+
+All integers are big-endian signed integers. Each chunk starts with a 32-bit
+integer indicating the length of the entire chunk (including the length field
+itself).
+
+There is a special case chunk that has a value of 0 for the length
+(``0x00000000``). We call this an *empty chunk*.
+
+Delta Groups
+============
+
+A *delta group* expresses the content of a revlog as a series of deltas,
+or patches against previous revisions.
+
+Delta groups consist of 0 or more *chunks* followed by the *empty chunk*
+to signal the end of the delta group::
+
+  +------------------------------------------------------------------------+
+  |                |             |               |             |           |
+  | chunk0 length  | chunk0 data | chunk1 length | chunk1 data |    0x0    |
+  |   (4 bytes)    |  (various)  |   (4 bytes)   |  (various)  | (4 bytes) |
+  |                |             |               |             |           |
+  +------------------------------------------------------------------------+
+
+Each *chunk*'s data consists of the following::
+
+  +---------------------------------------+
+  |                        |              |
+  |     delta header       |  delta data  |
+  |  (various by version)  |  (various)   |
+  |                        |              |
+  +---------------------------------------+
+
+The *delta data* is a series of *delta*s that describe a diff from an existing
+entry (either that the recipient already has, or previously specified in the
+bundle/changegroup).
+
+The *delta header* is different between versions ``1``, ``2``, and
+``3`` of the changegroup format.
+
+Version 1 (headerlen=80)::
+
+   +------------------------------------------------------+
+   |            |             |             |             |
+   |    node    |   p1 node   |   p2 node   |  link node  |
+   | (20 bytes) |  (20 bytes) |  (20 bytes) |  (20 bytes) |
+   |            |             |             |             |
+   +------------------------------------------------------+
+
+Version 2 (headerlen=100)::
+
+   +------------------------------------------------------------------+
+   |            |             |             |            |            |
+   |    node    |   p1 node   |   p2 node   | base node  | link node  |
+   | (20 bytes) |  (20 bytes) |  (20 bytes) | (20 bytes) | (20 bytes) |
+   |            |             |             |            |            |
+   +------------------------------------------------------------------+
+
+Version 3 (headerlen=102)::
+
+   +------------------------------------------------------------------------------+
+   |            |             |             |            |            |           |
+   |    node    |   p1 node   |   p2 node   | base node  | link node  |   flags   |
+   | (20 bytes) |  (20 bytes) |  (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) |
+   |            |             |             |            |            |           |
+   +------------------------------------------------------------------------------+
+
+The *delta data* consists of ``chunklen - 4 - headerlen`` bytes, which contain a
+series of *delta*s, densely packed (no separators). These deltas describe a diff
+from an existing entry (either that the recipient already has, or previously
+specified in the bundle/changegroup). The format is described more fully in
+``hg help internals.bdiff``, but briefly::
+
+   +---------------------------------------------------------------+
+   |              |            |            |                      |
+   | start offset | end offset | new length |        content       |
+   |  (4 bytes)   |  (4 bytes) |  (4 bytes) | (<new length> bytes) |
+   |              |            |            |                      |
+   +---------------------------------------------------------------+
+
+Please note that the length field in the delta data does *not* include itself.
+
+In version 1, the delta is always applied against the previous node from
+the changegroup or the first parent if this is the first entry in the
+changegroup.
+
+In version 2 and up, the delta base node is encoded in the entry in the
+changegroup. This allows the delta to be expressed against any parent,
+which can result in smaller deltas and more efficient encoding of data.
+
+The *flags* field holds bitwise flags affecting the processing of revision
+data. The following flags are defined:
+
+32768
+   Censored revision. The revision's fulltext has been replaced by censor
+   metadata. May only occur on file revisions.
+16384
+   Ellipsis revision. Revision hash does not match data (likely due to rewritten
+   parents).
+8192
+   Externally stored. The revision fulltext contains ``key:value`` ``\n``
+   delimited metadata defining an object stored elsewhere. Used by the LFS
+   extension.
+
+For historical reasons, the integer values are identical to revlog version 1
+per-revision storage flags and correspond to bits being set in this 2-byte
+field. Bits were allocated starting from the most-significant bit, hence the
+reverse ordering and allocation of these flags.
+
+Changeset Segment
+=================
+
+The *changeset segment* consists of a single *delta group* holding
+changelog data. The *empty chunk* at the end of the *delta group* denotes
+the boundary to the *manifest segment*.
+
+Manifest Segment
+================
+
+The *manifest segment* consists of a single *delta group* holding manifest
+data. If treemanifests are in use, it contains only the manifest for the
+root directory of the repository. Otherwise, it contains the entire
+manifest data. The *empty chunk* at the end of the *delta group* denotes
+the boundary to the next segment (either the *treemanifests segment* or the
+*filelogs segment*, depending on version and the request options).
+
+Treemanifests Segment
+---------------------
+
+The *treemanifests segment* only exists in changegroup version ``3``, and
+only if the 'treemanifest' param is part of the bundle2 changegroup part
+(it is not possible to use changegroup version 3 outside of bundle2).
+Aside from the filenames in the *treemanifests segment* containing a
+trailing ``/`` character, it behaves identically to the *filelogs segment*
+(see below). The final sub-segment is followed by an *empty chunk* (logically,
+a sub-segment with filename size 0). This denotes the boundary to the
+*filelogs segment*.
+
+Filelogs Segment
+================
+
+The *filelogs segment* consists of multiple sub-segments, each
+corresponding to an individual file whose data is being described::
+
+   +--------------------------------------------------+
+   |          |          |          |     |           |
+   | filelog0 | filelog1 | filelog2 | ... |    0x0    |
+   |          |          |          |     | (4 bytes) |
+   |          |          |          |     |           |
+   +--------------------------------------------------+
+
+The final filelog sub-segment is followed by an *empty chunk* (logically,
+a sub-segment with filename size 0). This denotes the end of the segment
+and of the overall changegroup.
+
+Each filelog sub-segment consists of the following::
+
+   +------------------------------------------------------+
+   |                 |                      |             |
+   | filename length |       filename       | delta group |
+   |    (4 bytes)    | (<length - 4> bytes) |  (various)  |
+   |                 |                      |             |
+   +------------------------------------------------------+
+
+That is, a *chunk* consisting of the filename (not terminated or padded)
+followed by N chunks constituting the *delta group* for this file. The
+*empty chunk* at the end of each *delta group* denotes the boundary to the
+next filelog sub-segment.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/config.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,109 @@
+All config options used within Mercurial should be registered.
+
+Config Option in Core
+=====================
+
+Config options used by Mercurial core are registered in the
+``mercurial.configitems`` module.
+
+Simple entry
+------------
+
+A registration entry typically looks like::
+
+    coreconfigitem('section', 'option',
+        default=MyDefaultValue,
+    )
+
+Once registered, Mercurial will know that ``section.option`` is a legitimate
+config option and that ``MyDefaultValue`` should be used if no other values are
+defined in configuration files.
+
+Complex default value
+---------------------
+
+If the default provided is a callable, it is called to retrieve the default
+value when accessing the config option. This is useful for default values that
+are mutable like the empty list::
+
+    coreconfigitem('pager', 'ignore',
+        default=list,
+    )
+
+In addition, there are cases where the default is not fixed, but computed from
+other properties. In this case, use the ``dynamicdefault`` object as the value
+for the ``default`` parameter. A default value is then explicitly required when
+reading the option::
+
+    # registration
+    coreconfigitem('web', 'name',
+        default=dynamicdefault,
+    )
+
+    # usage
+    ui.config('web', 'name', dirname)
+
+Free form options
+-----------------
+
+Some config sections use free form options (e.g. ``paths``). You can register
+them using the ``generic`` parameters::
+
+    coreconfigitem('paths', '.*',
+        default=None,
+        generic=True,
+    )
+
+When ``generic=True`` is set, the option name is matched as a regular expression
+(rooted to string start). It can be used to select specific sub parameters::
+
+    coreconfigitem('merge-tools', br'.*\.args$',
+        default="$local $base $other",
+        generic=True,
+        priority=-1,
+    )
+
+The ``priority`` parameter controls the order used to match the generic pattern
+(lower first).
+
+Config Option in Extensions
+===========================
+
+General case
+------------
+
+Extensions should register config items through the ``registrar`` API (also used
+for commands and others)::
+
+    configtable = {}
+    configitem = registrar.configitem(configtable)
+
+    configitem('blackbox', 'dirty',
+        default=False,
+    )
+
+The ``dynamicdefault`` object is then available as
+``configitem.dynamicdefault``.
+
+Supporting older versions
+-------------------------
+
+The registrar was introduced in Mercurial 4.3, and the ``generic`` parameter was
+introduced in 4.4. Starting with Mercurial 4.4, all core options were registered
+and developer warnings are emitted when accessing unregistered option.
+
+Extensions supporting versions older than Mercurial 4.3 cannot rely on the
+default value being registered. The simplest way to register an option while
+still supporting an older version is to use ``dynamicdefault`` for options
+requiring a default value. The existing code passing an explicit default can
+then stay in use until compatibility with Mercurial 4.2 is dropped.
+
+As reminder, here are the default values for each config type:
+
+    - config:      None
+    - configbool:  False
+    - configbytes: 0
+    - configdate:  None
+    - configint:   None
+    - configlist:  []
+    - configpath:  None
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/extensions.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,367 @@
+Extensions allow the creation of new features and using them directly from
+the main hg command line as if they were built-in commands. The extensions
+have full access to the *internal* API.
+
+Use of Mercurial's internal API very likely makes your code subject to
+Mercurial's license. Before going any further, read the License page.
+
+There are NO guarantees that third-party code calling into Mercurial's
+internals won't break from release to release. If you do use Mercurial's API
+for published third-party code, we expect you to test your code before each
+major Mercurial release. This will prevent various bug reports from your users
+when they upgrade their copy of Mercurial.
+
+File Layout
+===========
+
+Extensions are usually written as simple python modules. Larger ones are
+better split into multiple modules of a single package (see the convert
+extension). The package root module gives its name to the extension and
+implements the ``cmdtable`` and optional callbacks described below.
+
+Command table
+=============
+
+To write your own extension, your python module can provide an optional dict
+named ``cmdtable`` with entries describing each command. A command should be
+registered to the ``cmdtable`` by ``@command`` decorator.
+
+Example using ``@command`` decorator (requires Mercurial 1.9)::
+
+    from mercurial.i18n import _
+
+    cmdtable = {}
+    try:
+        from mercurial import registrar
+        command = registrar.command(cmdtable)
+    except (AttributeError, ImportError):
+        # Fallback to hg < 4.3 support
+        from mercurial import cmdutil
+        command = cmdutil.command(cmdtable)
+
+    @command('print-parents',
+        [('s', 'short', None, _('print short form')),
+         ('l', 'long', None, _('print long form'))],
+        _('[options] node'))
+    def printparents(ui, repo, node, **opts):
+        ...
+
+The cmdtable dictionary
+-----------------------
+
+The ``cmdtable`` dictionary uses as key the new command names, and, as value,
+a tuple containing:
+
+1. the function to be called when the command is used.
+2. a list of options the command can take.
+3. a command line synopsis for the command (the function docstring is used for
+   the full help).
+
+List of options
+---------------
+
+All the command flag options are documented in the mercurial/fancyopts.py
+sources.
+
+The options list is a list of tuples containing:
+
+1. the short option letter, or ``''`` if no short option is available
+   (for example, ``o`` for a ``-o`` option).
+2. the long option name (for example, ``option`` for a ``--option`` option).
+3. a default value for the option.
+4. a help string for the option (it's possible to omit the "hg newcommand"
+   part and only the options and parameter substring is needed).
+
+Command function signatures
+---------------------------
+
+Functions that implement new commands always receive a ``ui`` and usually
+a ``repo`` parameter. The rest of parameters are taken from the command line
+items that don't start with a dash and are passed in the same order they were
+written. If no default value is given in the parameter list they are required.
+
+If there is no repo to be associated with the command and consequently no
+``repo`` passed, then ``norepo=True`` should be passed to the ``@command``
+decorator::
+
+    @command('mycommand', [], norepo=True)
+    def mycommand(ui, **opts):
+        ...
+
+For examples of ``norepo``, see the convert extension.
+
+Command function docstrings
+===========================
+
+The docstring of your function is used as the main help text, shown by
+``hg help mycommand``. The docstring should be formatted using a simple
+subset of reStructuredText markup. The supported constructs include:
+
+Paragraphs::
+
+    This is a paragraph.
+
+    Paragraphs are separated
+    by blank lines.
+
+A verbatim block is introduced with a double colon followed by an indented
+block. The double colon is turned into a single colon on display::
+
+    Some text::
+
+      verbatim
+        text
+         !!
+
+We have field lists::
+
+    :key1: value1
+    :key2: value2
+
+Bullet lists::
+
+    - foo
+    - bar
+
+Enumerated lists::
+
+    1. foo
+    2. bar
+
+Inline markup::
+
+    ``*bold*``, ``monospace``, :hg:`command`
+
+Mark Mercurial commands with ``:hg:`` to make a nice link to the corresponding
+documentation. We'll expand the support if new constructs can be parsed
+without too much trouble.
+
+Communicating with the user
+===========================
+
+Besides the ``ui`` methods, like ``ui.write(*msg)`` or
+``ui.prompt(msg, default="y")``, an extension can add help text for each
+of its commands and the extension itself.
+
+The module docstring will be used as help string when ``hg help extensionname``
+is used and, similarly, the help string for a command and the docstring
+belonging to the function that's wrapped by the command will be shown when
+``hg help command`` is invoked.
+
+Setup Callbacks
+===============
+
+Extensions are loaded in phases. All extensions are processed in a given phase
+before the next phase begins. In the first phase, all extension modules are
+loaded and registered with Mercurial. This means that you can find all enabled
+extensions with ``extensions.find`` in the following phases.
+
+Extension setup
+---------------
+
+There are two callbacks to be called when extensions are loaded, named
+``uisetup`` and ``extsetup``. ``uisetup`` is called first for each extension,
+then ``extsetup`` is called. This means ``extsetup`` can be useful in case
+one extension optionally depends on another extension.
+
+Both ``uisetup`` and ``extsetup`` receive a ui object with the local
+repository configuration::
+
+    def uisetup(ui):
+        # ...
+
+    def extsetup(ui):
+        # ...
+
+Be aware that ``uisetup`` in NOT the function to configure a ``ui`` instance.
+It's called only once per process, not per ``ui`` instance. Also, any changes
+to the ``ui`` may be discarded because the ``ui`` here temporarily loaded
+local configuration. So, it's generally wrong to do `ui.setconfig()` in
+these callbacks. Notable exception is setting ``pre/post-<command>`` hooks
+and extending ``ui.__class__``.
+
+In Mercurial 1.3.1 or earlier, ``extsetup`` takes no argument.
+
+Command table setup
+-------------------
+
+After ``extsetup``, the ``cmdtable`` is copied into the global command table
+in Mercurial.
+
+Ui instance setup
+-----------------
+
+The optional ``uipopulate`` is called for each ``ui`` instance after
+configuration is loaded, where extensions can set up additional ui members,
+update configuration by ``ui.setconfig()``, and extend the class dynamically.
+
+Typically there are three ``ui`` instances involved in command execution:
+
+``req.ui`` (or ``repo.baseui``)
+    Only system and user configurations are loaded into it.
+``lui``
+    Local repository configuration is loaded as well. This will be used at
+    early dispatching stage where a repository isn't available.
+``repo.ui``
+    The fully-loaded ``ui`` used after a repository is instantiated. This
+    will be created from the ``req.ui`` per repository.
+
+In command server and hgweb, this may be called more than once for the same
+``ui`` instance.
+
+(New in Mercurial 4.9)
+
+Repository setup
+----------------
+
+Extensions can implement an optional callback named ``reposetup``. It is
+called after the main Mercurial repository initialization, and can be used
+to setup any local state the extension might need.
+
+As other command functions it receives an ``ui`` object and a ``repo`` object
+(no additional parameters for this, though)::
+
+    def reposetup(ui, repo):
+        #do initialization here.
+
+It is important to take into account that the ``ui`` object that is received
+by the ``reposetup`` function is not the same as the one received by the
+``uisetup`` and ``extsetup`` functions. This is particularly important when
+setting up hooks as described in the following section, since not all hooks
+use the same ``ui`` object and hence different hooks must be configured in
+different setup functions.
+
+Wrapping methods on the ui and repo classes
+-------------------------------------------
+
+Because extensions can be loaded *per repository*, you should avoid using
+``extensions.wrapfunction()`` on methods of the ``ui`` and ``repo`` objects.
+Instead, create a subclass of the specific class of the instance passed into
+the ``*setup()`` hook; e.g. use ``ui.__class__`` as the base class, then
+reassign your new class to ``ui.__class__`` again. Mercurial will then use
+your updated ``ui`` or ``repo`` instance only for repositories where your
+extension is enabled (or copies thereof, reusing your new class).
+
+For example::
+
+    def uisetup(ui):
+        class echologui(ui.__class__):
+            def log(self, service, *msg, **opts):
+                if msg:
+                    self.write('%s: %s\n' % (service, msg[0] % msg[1:]))
+                super(echologui, self).log(service, *msg, **opts)
+
+        ui.__class__ = echologui
+
+Configuring Hooks
+=================
+
+Some extensions must use hooks to do their work. These required hooks can
+be configured manually by the user by modifying the ``[hook]`` section of
+their hgrc, but they can also be configured automatically by calling the
+``ui.setconfig('hooks', ...)`` function in one of the setup functions
+described above.
+
+The main difference between manually modifying the hooks section in the hgrc
+and using ``ui.setconfig()`` is that when using ``ui.setconfig()`` you have
+access to the actual hook function object, which you can pass directly to
+``ui.setconfig()``, while when you use the hooks section of the hgrc file
+you must refer to the hook function by using the
+``python:modulename.functioname`` idiom (e.g. ``python:hgext.notify.hook``).
+
+For example::
+
+    # Define hooks -- note that the actual function name it irrelevant.
+    def preupdatehook(ui, repo, **kwargs):
+        ui.write("Pre-update hook triggered\n")
+
+    def updatehook(ui, repo, **kwargs):
+        ui.write("Update hook triggered\n")
+
+    def uisetup(ui):
+        # When pre-<cmd> and post-<cmd> hooks are configured by means of
+        # the ui.setconfig() function, you must use the ui object passed
+        # to uisetup or extsetup.
+        ui.setconfig("hooks", "pre-update.myextension", preupdatehook)
+
+    def reposetup(ui, repo):
+        # Repository-specific hooks can be configured here. These include
+        # the update hook.
+        ui.setconfig("hooks", "update.myextension", updatehook)
+
+Note how different hooks may need to be configured in different setup
+functions. In the example you can see that the ``update`` hook must be
+configured in the ``reposetup`` function, while the ``pre-update`` hook
+must be configured on the ``uisetup`` or the ``extsetup`` functions.
+
+Marking compatible versions
+===========================
+
+Every extension should use the ``testedwith`` variable to specify Mercurial
+releases it's known to be compatible with. This helps us and users diagnose
+where problems are coming from::
+
+    testedwith = '2.0 2.0.1 2.1 2.1.1 2.1.2'
+
+Do not use the ``internal`` marker in third-party extensions; we will
+immediately drop all bug reports mentioning your extension if we catch you
+doing this.
+
+Similarly, an extension can use the ``buglink`` variable to specify how users
+should report issues with the extension.  This link will be included in the
+error message if the extension produces errors::
+
+    buglink = 'https://bitbucket.org/USER/REPO/issues'
+
+If an extension requires a minimum version of Mercurial, it can be declared
+with the ``minimumhgversion`` variable::
+
+    minimumhgversion = '4.6'
+
+Older clients will print a warning that the extension requires a new version,
+instead of attempting to load it.
+
+Wrap up: what belongs where?
+============================
+
+You will find here a list of most common tasks, based on setups from the
+extensions included in Mercurial core.
+
+uisetup
+-------
+
+* Changes to ``ui.__class__`` . The ``ui`` object that will be used to run
+  the command has not yet been created. Changes made here will affect ``ui``
+  objects created after this, and in particular the ``ui`` that will be passed
+  to ``runcommand``
+* Command wraps (``extensions.wrapcommand``)
+* Changes that need to be visible by other extensions: because initialization
+  occurs in phases (all extensions run ``uisetup``, then all run ``extsetup``),
+  a change made here will be visible by other extensions during ``extsetup``.
+* Monkeypatches or function wraps (``extensions.wrapfunction``) of ``dispatch``
+  module members
+* Set up ``pre-*`` and ``post-*`` hooks. (DEPRECATED. ``uipopulate`` is
+  preferred on Mercurial 4.9 and later.)
+* ``pushkey`` setup
+
+extsetup
+--------
+
+* Changes depending on the status of other extensions. (``if extensions.find('mq')``)
+* Add a global option to all commands
+* Extend revsets
+
+uipopulate
+----------
+
+* Modify ``ui`` instance attributes and configuration variables.
+* Changes to ``ui.__class__`` per instance.
+* Set up all hooks per scoped configuration.
+
+reposetup
+---------
+
+* Set up all hooks but ``pre-*`` and ``post-*``. (DEPRECATED. ``uipopulate`` is
+  preferred on Mercurial 4.9 and later.)
+* Modify configuration variables
+* Changes to ``repo.__class__``, ``repo.dirstate.__class__``
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/linelog.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,302 @@
+linelog is a storage format inspired by the "Interleaved deltas" idea. See
+https://en.wikipedia.org/wiki/Interleaved_deltas for its introduction.
+
+0. SCCS Weave
+
+  To understand what linelog is, first we have a quick look at a simplified
+  (with header removed) SCCS weave format, which is an implementation of the
+  "Interleaved deltas" idea.
+
+0.1 Basic SCCS Weave File Format
+
+  A SCCS weave file consists of plain text lines. Each line is either a
+  special instruction starting with "^A" or part of the content of the real
+  file the weave tracks. There are 3 important operations, where REV denotes
+  the revision number:
+
+    ^AI REV, marking the beginning of an insertion block introduced by REV
+    ^AD REV, marking the beginning of a deletion block introduced by REV
+    ^AE REV, marking the end of the block started by "^AI REV" or "^AD REV"
+
+  Note on revision numbers: For any two different revision numbers, one must
+  be an ancestor of the other to make them comparable. This enforces linear
+  history. Besides, the comparison functions (">=", "<") should be efficient.
+  This means, if revisions are strings like git or hg, an external map is
+  required to convert them into integers.
+
+  For example, to represent the following changes:
+
+    REV 1 | REV 2 | REV 3
+    ------+-------+-------
+    a     | a     | a
+    b     | b     | 2
+    c     | 1     | c
+          | 2     |
+          | c     |
+
+  A possible weave file looks like:
+
+    ^AI 1
+    a
+    ^AD 3
+    b
+    ^AI 2
+    1
+    ^AE 3
+    2
+    ^AE 2
+    c
+    ^AE 1
+
+  An "^AE" does not always match its nearest operation ("^AI" or "^AD"). In
+  the above example, "^AE 3" does not match the nearest "^AI 2" but "^AD 3".
+  Therefore we need some extra information for "^AE". The SCCS weave uses a
+  revision number. It could also be a boolean value about whether it is an
+  insertion or a deletion (see section 0.4).
+
+0.2 Checkout
+
+  The "checkout" operation is to retrieve file content at a given revision,
+  say X. It's doable by going through the file line by line and:
+
+    - If meet ^AI rev, and rev > X, find the corresponding ^AE and jump there
+    - If meet ^AD rev, and rev <= X, find the corresponding ^AE and jump there
+    - Ignore ^AE
+    - For normal lines, just output them
+
+0.3 Annotate
+
+  The "annotate" operation is to show extra metadata like the revision number
+  and the original line number a line comes from.
+
+  It's basically just a "Checkout". For the extra metadata, they can be stored
+  side by side with the line contents. Alternatively, we can infer the
+  revision number from "^AI"s.
+
+  Some SCM tools have to calculate diffs on the fly and thus are much slower
+  on this operation.
+
+0.4 Tree Structure
+
+  The word "interleaved" is used because "^AI" .. "^AE" and "^AD" .. "^AE"
+  blocks can be interleaved.
+
+  If we consider insertions and deletions separately, they can form tree
+  structures, respectively.
+
+    +--- ^AI 1        +--- ^AD 3
+    | +- ^AI 2        | +- ^AD 2
+    | |               | |
+    | +- ^AE 2        | +- ^AE 2
+    |                 |
+    +--- ^AE 1        +--- ^AE 3
+
+  More specifically, it's possible to build a tree for all insertions, where
+  the tree node has the structure "(rev, startline, endline)". "startline" is
+  the line number of "^AI" and "endline" is the line number of the matched
+  "^AE".  The tree will have these properties:
+
+    1. child.rev > parent.rev
+    2. child.startline > parent.startline
+    3. child.endline < parent.endline
+
+  A similar tree for all deletions can also be built with the first property
+  changed to:
+
+    1. child.rev < parent.rev
+
+0.5 Malformed Cases
+
+  The following cases are considered malformed in our implementation:
+
+    1. Interleaved insertions, or interleaved deletions.
+       It can be rewritten to a non-interleaved tree structure.
+
+       Take insertions as example, deletions are similar:
+
+       ^AI x         ^AI x
+       a             a
+       ^AI x + 1  -> ^AI x + 1
+       b             b
+       ^AE x         ^AE x + 1
+       c             ^AE x
+       ^AE x + 1     ^AI x + 1
+                     c
+                     ^AE x + 1
+
+    2. Nested insertions, where the inner one has a smaller revision number.
+       Or nested deletions, where the inner one has a larger revision number.
+       It can be rewritten to a non-nested form.
+
+       Take insertions as example, deletions are similar:
+
+       ^AI x + 1     ^AI x + 1
+       a             a
+       ^AI x      -> ^AE x + 1
+       b             ^AI x
+       ^AE x         b
+       c             ^AE x
+       ^AE x + 1     ^AI x + 1
+                     c
+                     ^AE x + 1
+
+    3. Insertion inside deletion with a smaller revision number.
+
+       Rewrite by duplicating the content inserted:
+
+       ^AD x          ^AD x
+       a              a
+       ^AI x + 1  ->  b
+       b              c
+       ^AE x + 1      ^AE x
+       c              ^AI x + 1
+       ^AE x          b
+                      ^AE x + 1
+
+       Note: If "annotate" purely depends on "^AI" information, then the
+       duplication content will lose track of where "b" is originally from.
+
+  Some of them may be valid in other implementations for special purposes. For
+  example, to "revive" a previously deleted block in a newer revision.
+
+0.6 Cases Can Be Optimized
+
+  It's always better to get things nested. For example, the left is more
+  efficient than the right while they represent the same content:
+
+    +--- ^AD 2          +- ^AD 1
+    | +- ^AD 1          |   LINE A
+    | |   LINE A        +- ^AE 1
+    | +- ^AE 1          +- ^AD 2
+    |     LINE B        |   LINE B
+    +--- ^AE 2          +- ^AE 2
+
+  Our implementation sometimes generates the less efficient data. To always
+  get the optimal form, it requires extra code complexity that seems unworthy.
+
+0.7 Inefficiency
+
+  The file format can be slow because:
+
+  - Inserting a new line at position P requires rewriting all data after P.
+  - Finding "^AE" requires walking through the content (O(N), where N is the
+    number of lines between "^AI/D" and "^AE").
+
+1. Linelog
+
+  The linelog is a binary format that dedicates to speed up mercurial (or
+  git)'s "annotate" operation. It's designed to avoid issues mentioned in
+  section 0.7.
+
+1.1 Content Stored
+
+  Linelog is not another storage for file contents. It only stores line
+  numbers and corresponding revision numbers, instead of actual line content.
+  This is okay for the "annotate" operation because usually the external
+  source is fast to checkout the content of a file at a specific revision.
+
+  A typical SCCS weave is also fast on the "grep" operation, which needs
+  random accesses to line contents from different revisions of a file. This
+  can be slow with linelog's no-line-content design. However we could use
+  an extra map ((rev, line num) -> line content) to speed it up.
+
+  Note the revision numbers in linelog should be independent from mercurial
+  integer revision numbers. There should be some mapping between linelog rev
+  and hg hash stored side by side, to make the files reusable after being
+  copied to another machine.
+
+1.2 Basic Format
+
+  A linelog file consists of "instruction"s. An "instruction" can be either:
+
+    - JGE  REV ADDR     # jump to ADDR if rev >= REV
+    - JL   REV ADDR     # jump to ADDR if rev < REV
+    - LINE REV LINENUM  # append the (LINENUM+1)-th line in revision REV
+
+  For example, here is the example linelog representing the same file with
+  3 revisions mentioned in section 0.1:
+
+    SCCS  |    Linelog
+    Weave | Addr : Instruction
+    ------+------+-------------
+    ^AI 1 |    0 : JL   1 8
+    a     |    1 : LINE 1 0
+    ^AD 3 |    2 : JGE  3 6
+    b     |    3 : LINE 1 1
+    ^AI 2 |    4 : JL   2 7
+    1     |    5 : LINE 2 2
+    ^AE 3 |
+    2     |    6 : LINE 2 3
+    ^AE 2 |
+    c     |    7 : LINE 1 2
+    ^AE 1 |
+          |    8 : END
+
+  This way, "find ^AE" is O(1) because we just jump there. And we can insert
+  new lines without rewriting most part of the file by appending new lines and
+  changing a single instruction to jump to them.
+
+  The current implementation uses 64 bits for an instruction: The opcode (JGE,
+  JL or LINE) takes 2 bits, REV takes 30 bits and ADDR or LINENUM takes 32
+  bits. It also stores the max revision number and buffer size at the first
+  64 bits for quick access to these values.
+
+1.3 Comparing with Mercurial's revlog format
+
+  Apparently, linelog is very different from revlog: linelog stores rev and
+  line numbers, while revlog has line contents and other metadata (like
+  parents, flags). However, the revlog format could also be used to store rev
+  and line numbers. For example, to speed up the annotate operation, we could
+  also pre-calculate annotate results and just store them using the revlog
+  format.
+
+  Therefore, linelog is actually somehow similar to revlog, with the important
+  trade-off that it only supports linear history (mentioned in section 0.1).
+  Essentially, the differences are:
+
+    a) Linelog is full of deltas, while revlog could contain full file
+       contents sometimes. So linelog is smaller. Revlog could trade
+       reconstruction speed for file size - best case, revlog is as small as
+       linelog.
+    b) The interleaved delta structure allows skipping large portion of
+       uninteresting deltas so linelog's content reconstruction is faster than
+       the delta-only version of revlog (however it's possible to construct
+       a case where interleaved deltas degrade to plain deltas, so linelog
+       worst case would be delta-only revlog). Revlog could trade file size
+       for reconstruction speed.
+    c) Linelog implicitly maintains the order of all lines it stores. So it
+       could dump all the lines from all revisions, with a reasonable order.
+       While revlog could also dump all line additions, it requires extra
+       computation to figure out the order putting those lines - that's some
+       kind of "merge".
+
+  "c" makes "hg absorb" easier to implement and makes it possible to do
+  "annotate --deleted".
+
+1.4 Malformed Cases Handling
+
+  The following "case 1", "case 2", and "case 3" refer to cases mentioned
+  in section 0.5.
+
+  Using the exposed API (replacelines), case 1 is impossible to generate,
+  although it's possible to generate it by constructing rawdata and load that
+  via linelog.fromdata.
+
+  Doing annotate(maxrev) before replacelines (aka. a1, a2 passed to
+  replacelines are related to the latest revision) eliminates the possibility
+  of case 3. That makes sense since usually you'd like to make edits on top of
+  the latest revision. Practically, both absorb and fastannotate do this.
+
+  Doing annotate(maxrev), plus replacelines(rev, ...) where rev >= maxrev
+  eliminates the possibility of case 2. That makes sense since usually the
+  edits belong to "new revisions", not "old revisions". Practically,
+  fastannotate does this. Absorb calls replacelines with rev < maxrev to edit
+  past revisions. So it needs some extra care to not generate case 2.
+
+  If case 1 occurs, that probably means linelog file corruption (assuming
+  linelog is edited via public APIs) the checkout or annotate result could
+  be less meaningful or even error out, but linelog wouldn't enter an infinite
+  loop.
+
+  If either case 2 or 3 occurs, linelog works as if the inner "^AI/D" and "^AE"
+  operations on the left side are silently ignored.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/mergestate.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,80 @@
+The active mergestate is stored in ``.hg/merge`` when a merge is triggered
+by commands like ``hg merge``, ``hg rebase``, etc. until the merge is
+completed or aborted to track the 3-way merge state of individual files.
+
+The contents of the directory are:
+
+Conflicting files
+-----------------
+
+The local version of the conflicting files are stored with their
+filenames as the hash of their paths.
+
+state
+-----
+
+This mergestate file record is used by hg version prior to 2.9.1
+and contains less data than ``state2``. If there is no contradiction
+with ``state2``, we can assume that both are written at the same time.
+In this case, data from ``state2`` is used. Otherwise, we use ``state``.
+We read/write both ``state`` and ``state2`` records to ensure backward
+compatibility.
+
+state2
+------
+
+This record stores a superset of data in ``state``, including new kinds
+of records in the future.
+
+Each record can contain arbitrary content and has an associated type. This
+`type` should be a letter. If `type` is uppercase, the record is mandatory:
+versions of Mercurial that don't support it should abort. If `type` is
+lowercase, the record can be safely ignored.
+
+Currently known records:
+
+| * L: the node of the "local" part of the merge (hexified version)
+| * O: the node of the "other" part of the merge (hexified version)
+| * F: a file to be merged entry
+| * C: a change/delete or delete/change conflict
+| * D: a file that the external merge driver will merge internally
+|      (experimental)
+| * P: a path conflict (file vs directory)
+| * m: the external merge driver defined for this merge plus its run state
+|      (experimental)
+| * f: a (filename, dictionary) tuple of optional values for a given file
+| * X: unsupported mandatory record type (used in tests)
+| * x: unsupported advisory record type (used in tests)
+| * l: the labels for the parts of the merge.
+
+Merge driver run states (experimental):
+
+| * u: driver-resolved files unmarked -- needs to be run next time we're
+|      about to resolve or commit
+| * m: driver-resolved files marked -- only needs to be run before commit
+| * s: success/skipped -- does not need to be run any more
+
+Merge record states (indexed by filename):
+
+| * u: unresolved conflict
+| * r: resolved conflict
+| * pu: unresolved path conflict (file conflicts with directory)
+| * pr: resolved path conflict
+| * d: driver-resolved conflict
+
+The resolve command transitions between 'u' and 'r' for conflicts and
+'pu' and 'pr' for path conflicts.
+
+This format is a list of arbitrary records of the form:
+
+[type][length][content]
+
+`type` is a single character, `length` is a 4 byte integer, and
+`content` is an arbitrary byte sequence of length `length`.
+
+Mercurial versions prior to 3.7 have a bug where if there are
+unsupported mandatory merge records, attempting to clear out the merge
+state with hg update --clean or similar aborts. The 't' record type
+works around that by writing out what those versions treat as an
+advisory record, but later versions interpret as special: the first
+character is the 'real' record type and everything onwards is the data.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/requirements.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,144 @@
+Repositories contain a file (``.hg/requires``) containing a list of
+features/capabilities that are *required* for clients to interface
+with the repository. This file has been present in Mercurial since
+version 0.9.2 (released December 2006).
+
+One of the first things clients do when opening a repository is read
+``.hg/requires`` and verify that all listed requirements are supported,
+aborting if not. Requirements are therefore a strong mechanism to
+prevent incompatible clients from reading from unknown repository
+formats or even corrupting them by writing to them.
+
+Extensions may add requirements. When they do this, clients not running
+an extension will be unable to read from repositories.
+
+The following sections describe the requirements defined by the
+Mercurial core distribution.
+
+revlogv1
+========
+
+When present, revlogs are version 1 (RevlogNG). RevlogNG was introduced
+in 2006. The ``revlogv1`` requirement has been enabled by default
+since the ``requires`` file was introduced in Mercurial 0.9.2.
+
+If this requirement is not present, version 0 revlogs are assumed.
+
+store
+=====
+
+The *store* repository layout should be used.
+
+This requirement has been enabled by default since the ``requires`` file
+was introduced in Mercurial 0.9.2.
+
+fncache
+=======
+
+The *fncache* repository layout should be used.
+
+The *fncache* layout hash encodes filenames with long paths and
+encodes reserved filenames.
+
+This requirement is enabled by default when the *store* requirement is
+enabled (which is the default behavior). It was introduced in Mercurial
+1.1 (released December 2008).
+
+shared
+======
+
+Denotes that the store for a repository is shared from another location
+(defined by the ``.hg/sharedpath`` file).
+
+This requirement is set when a repository is created via :hg:`share`.
+
+The requirement was added in Mercurial 1.3 (released July 2009).
+
+relshared
+=========
+
+Derivative of ``shared``; the location of the store is relative to the
+store of this repository.
+
+This requirement is set when a repository is created via :hg:`share`
+using the ``--relative`` option.
+
+The requirement was added in Mercurial 4.2 (released May 2017).
+
+dotencode
+=========
+
+The *dotencode* repository layout should be used.
+
+The *dotencode* layout encodes the first period or space in filenames
+to prevent issues on OS X and Windows.
+
+This requirement is enabled by default when the *store* requirement
+is enabled (which is the default behavior). It was introduced in
+Mercurial 1.7 (released November 2010).
+
+parentdelta
+===========
+
+Denotes a revlog delta encoding format that was experimental and
+replaced by *generaldelta*. It should not be seen in the wild because
+it was never enabled by default.
+
+This requirement was added in Mercurial 1.7 and removed in Mercurial
+1.9.
+
+generaldelta
+============
+
+Revlogs should be created with the *generaldelta* flag enabled. The
+generaldelta flag will cause deltas to be encoded against a parent
+revision instead of the previous revision in the revlog.
+
+Support for this requirement was added in Mercurial 1.9 (released
+July 2011). The requirement was disabled on new repositories by
+default until Mercurial 3.7 (released February 2016).
+
+manifestv2
+==========
+
+Denotes that version 2 of manifests are being used.
+
+Support for this requirement was added in Mercurial 3.4 (released
+May 2015). The new format failed to meet expectations and support
+for the format and requirement were removed in Mercurial 4.6
+(released May 2018) since the feature never graduated frome experiment
+status.
+
+treemanifest
+============
+
+Denotes that tree manifests are being used. Tree manifests are
+one manifest per directory (as opposed to a single flat manifest).
+
+Support for this requirement was added in Mercurial 3.4 (released
+August 2015). The requirement is currently experimental and is
+disabled by default.
+
+exp-sparse
+==========
+
+The working directory is sparse (only contains a subset of files).
+
+Support for this requirement was added in Mercurial 4.3 (released
+August 2017). This requirement and feature are experimental and may
+disappear in a future Mercurial release. The requirement will only
+be present on repositories that have opted in to a sparse working
+directory.
+
+bookmarksinstore
+==================
+
+Bookmarks are stored in ``.hg/store/`` instead of directly in ``.hg/``
+where they used to be stored. The active bookmark is still stored
+directly in ``.hg/``. This makes them always shared by ``hg share``,
+whether or not ``-B`` was passed.
+
+Support for this requirement was added in Mercurial 5.1 (released
+August 2019). The requirement will only be present on repositories
+that have opted in to this format (by having
+``format.bookmarks-in-store=true`` set when they were created).
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/revlogs.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,239 @@
+Revision logs - or *revlogs* - are an append only data structure for
+storing discrete entries, or *revisions*. They are the primary storage
+mechanism of repository data.
+
+Revlogs effectively model a directed acyclic graph (DAG). Each node
+has edges to 1 or 2 *parent* nodes. Each node contains metadata and
+the raw value for that node.
+
+Revlogs consist of entries which have metadata and revision data.
+Metadata includes the hash of the revision's content, sizes, and
+links to its *parent* entries. The collective metadata is referred
+to as the *index* and the revision data is the *data*.
+
+Revision data is stored as a series of compressed deltas against
+ancestor revisions.
+
+Revlogs are written in an append-only fashion. We never need to rewrite
+a file to insert nor do we need to remove data. Rolling back in-progress
+writes can be performed by truncating files. Read locks can be avoided
+using simple techniques. This means that references to other data in
+the same revlog *always* refer to a previous entry.
+
+Revlogs can be modeled as 0-indexed arrays. The first revision is
+revision #0 and the second is revision #1. The revision -1 is typically
+used to mean *does not exist* or *not defined*.
+
+File Format
+===========
+
+A revlog begins with a 32-bit big endian integer holding version info
+and feature flags. This integer overlaps with the first four bytes of
+the first revision entry.
+
+This integer is logically divided into 2 16-bit shorts. The least
+significant half of the integer is the format/version short. The other
+short holds feature flags that dictate behavior of the revlog.
+
+The following values for the format/version short are defined:
+
+0
+   The original revlog version.
+1
+   RevlogNG (*next generation*). It replaced version 0 when it was
+   implemented in 2006.
+2
+   In-development version incorporating accumulated knowledge and
+   missing features from 10+ years of revlog version 1.
+57005 (0xdead)
+   Reserved for internal testing of new versions. No defined format
+   beyond 32-bit header.
+
+The feature flags short consists of bit flags. Where 0 is the least
+significant bit. The bit flags vary by revlog version.
+
+Version 0 revlogs have no defined flags and the presence of a flag
+is considered an error.
+
+Version 1 revlogs have the following flags at the specified bit offsets:
+
+0
+   Store revision data inline.
+1
+   Generaldelta encoding.
+
+Version 2 revlogs have the following flags at the specified bit offsets:
+
+0
+   Store revision data inline.
+
+The following header values are common:
+
+00 00 00 01
+   v1
+00 01 00 01
+   v1 + inline
+00 02 00 01
+   v1 + generaldelta
+00 03 00 01
+   v1 + inline + generaldelta
+
+Following the 32-bit header is the remaining 60 bytes of the first index
+entry. Following that are additional *index* entries. Inlined revision
+data is possibly located between index entries. More on this inlined
+layout is described below.
+
+Version 1 Format
+================
+
+Version 1 (RevlogNG) begins with an index describing the revisions in
+the revlog. If the ``inline`` flag is set, revision data is stored inline,
+or between index entries (as opposed to in a separate container).
+
+Each index entry is 64 bytes. The byte layout of each entry is as
+follows, with byte 0 being the first byte (all data stored as big endian):
+
+0-3 (4 bytes) (rev 0 only)
+   Revlog header
+
+0-5 (6 bytes)
+   Absolute offset of revision data from beginning of revlog.
+
+6-7 (2 bytes)
+   Bit flags impacting revision behavior. The following bit offsets define:
+
+   0: REVIDX_ISCENSORED revision has censor metadata, must be verified.
+
+   1: REVIDX_ELLIPSIS revision hash does not match its data. Used by
+   narrowhg
+
+   2: REVIDX_EXTSTORED revision data is stored externally.
+
+8-11 (4 bytes)
+   Compressed length of revision data / chunk as stored in revlog.
+
+12-15 (4 bytes)
+   Uncompressed length of revision data. This is the size of the full
+   revision data, not the size of the chunk post decompression.
+
+16-19 (4 bytes)
+   Base or previous revision this revision's delta was produced against.
+   This revision holds full text (as opposed to a delta) if it points to
+   itself. For generaldelta repos, this is the previous revision in the
+   delta chain. For non-generaldelta repos, this is the base or first
+   revision in the delta chain.
+
+20-23 (4 bytes)
+   A revision this revision is *linked* to. This allows a revision in
+   one revlog to be forever associated with a revision in another
+   revlog. For example, a file's revlog may point to the changelog
+   revision that introduced it.
+
+24-27 (4 bytes)
+   Revision of 1st parent. -1 indicates no parent.
+
+28-31 (4 bytes)
+   Revision of 2nd parent. -1 indicates no 2nd parent.
+
+32-63 (32 bytes)
+   Hash of revision's full text. Currently, SHA-1 is used and only
+   the first 20 bytes of this field are used. The rest of the bytes
+   are ignored and should be stored as \0.
+
+If inline revision data is being stored, the compressed revision data
+(of length from bytes offset 8-11 from the index entry) immediately
+follows the index entry. There is no header on the revision data. There
+is no padding between it and the index entries before and after.
+
+If revision data is not inline, then raw revision data is stored in a
+separate byte container. The offsets from bytes 0-5 and the compressed
+length from bytes 8-11 define how to access this data.
+
+The 6 byte absolute offset field from the first revlog entry overlaps
+with the revlog header. That is, the first 6 bytes of the first revlog
+entry can be split into four bytes containing the header for the revlog
+file and an additional two bytes containing the offset for the first
+entry. Since this is the offset from the beginning of the file for the
+first revision entry, the two bytes will always be set to zero.
+
+Version 2 Format
+================
+
+(In development. Format not finalized or stable.)
+
+Version 2 is identical to version 2 with the following differences.
+
+There is no dedicated *generaldelta* revlog format flag. Instead,
+the feature is implied enabled by default.
+
+Delta Chains
+============
+
+Revision data is encoded as a chain of *chunks*. Each chain begins with
+the compressed original full text for that revision. Each subsequent
+*chunk* is a *delta* against the previous revision. We therefore call
+these chains of chunks/deltas *delta chains*.
+
+The full text for a revision is reconstructed by loading the original
+full text for the base revision of a *delta chain* and then applying
+*deltas* until the target revision is reconstructed.
+
+*Delta chains* are limited in length so lookup time is bound. They are
+limited to ~2x the length of the revision's data. The linear distance
+between the base chunk and the final chunk is also limited so the
+amount of read I/O to load all chunks in the delta chain is bound.
+
+Deltas and delta chains are either computed against the previous
+revision in the revlog or another revision (almost certainly one of
+the parents of the revision). Historically, deltas were computed against
+the previous revision. The *generaldelta* revlog feature flag (enabled
+by default in Mercurial 3.7) activates the mode where deltas are
+computed against an arbitrary revision (almost certainly a parent revision).
+
+File Storage
+============
+
+Revlogs logically consist of an index (metadata of entries) and
+revision data. This data may be stored together in a single file or in
+separate files. The mechanism used is indicated by the ``inline`` feature
+flag on the revlog.
+
+Mercurial's behavior is to use inline storage until a revlog reaches a
+certain size, at which point it will be converted to non-inline. The
+reason there is a size limit on inline storage is to establish an upper
+bound on how much data must be read to load the index. It would be a waste
+to read tens or hundreds of extra megabytes of data just to access the
+index data.
+
+The actual layout of revlog files on disk is governed by the repository's
+*store format*. Typically, a ``.i`` file represents the index revlog
+(possibly containing inline data) and a ``.d`` file holds the revision data.
+
+Revision Entries
+================
+
+Revision entries consist of an optional 1 byte header followed by an
+encoding of the revision data. The headers are as follows:
+
+\0 (0x00)
+   Revision data is the entirety of the entry, including this header.
+u (0x75)
+   Raw revision data follows.
+x (0x78)
+   zlib (RFC 1950) data.
+
+   The 0x78 value is actually the first byte of the zlib header (CMF byte).
+
+Hash Computation
+================
+
+The hash of the revision is stored in the index and is used both as a primary
+key and for data integrity verification.
+
+Currently, SHA-1 is the only supported hashing algorithm. To obtain the SHA-1
+hash of a revision:
+
+1. Hash the parent nodes
+2. Hash the fulltext of the revision
+
+The 20 byte node ids of the parents are fed into the hasher in ascending order.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/wireprotocol.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,1277 @@
+The Mercurial wire protocol is a request-response based protocol
+with multiple wire representations.
+
+Each request is modeled as a command name, a dictionary of arguments, and
+optional raw input. Command arguments and their types are intrinsic
+properties of commands. So is the response type of the command. This means
+clients can't always send arbitrary arguments to servers and servers can't
+return multiple response types.
+
+The protocol is synchronous and does not support multiplexing (concurrent
+commands).
+
+Handshake
+=========
+
+It is required or common for clients to perform a *handshake* when connecting
+to a server. The handshake serves the following purposes:
+
+* Negotiating protocol/transport level options
+* Allows the client to learn about server capabilities to influence
+  future requests
+* Ensures the underlying transport channel is in a *clean* state
+
+An important goal of the handshake is to allow clients to use more modern
+wire protocol features. By default, clients must assume they are talking
+to an old version of Mercurial server (possibly even the very first
+implementation). So, clients should not attempt to call or utilize modern
+wire protocol features until they have confirmation that the server
+supports them. The handshake implementation is designed to allow both
+ends to utilize the latest set of features and capabilities with as
+few round trips as possible.
+
+The handshake mechanism varies by transport and protocol and is documented
+in the sections below.
+
+HTTP Protocol
+=============
+
+Handshake
+---------
+
+The client sends a ``capabilities`` command request (``?cmd=capabilities``)
+as soon as HTTP requests may be issued.
+
+By default, the server responds with a version 1 capabilities string, which
+the client parses to learn about the server's abilities. The ``Content-Type``
+for this response is ``application/mercurial-0.1`` or
+``application/mercurial-0.2`` depending on whether the client advertised
+support for version ``0.2`` in its request. (Clients aren't supposed to
+advertise support for ``0.2`` until the capabilities response indicates
+the server's support for that media type. However, a client could
+conceivably cache this metadata and issue the capabilities request in such
+a way to elicit an ``application/mercurial-0.2`` response.)
+
+Clients wishing to switch to a newer API service may send an
+``X-HgUpgrade-<X>`` header containing a space-delimited list of API service
+names the client is capable of speaking. The request MUST also include an
+``X-HgProto-<X>`` header advertising a known serialization format for the
+response. ``cbor`` is currently the only defined serialization format.
+
+If the request contains these headers, the response ``Content-Type`` MAY
+be for a different media type. e.g. ``application/mercurial-cbor`` if the
+client advertises support for CBOR.
+
+The response MUST be deserializable to a map with the following keys:
+
+apibase
+   URL path to API services, relative to the repository root. e.g. ``api/``.
+
+apis
+   A map of API service names to API descriptors. An API descriptor contains
+   more details about that API. In the case of the HTTP Version 2 Transport,
+   it will be the normal response to a ``capabilities`` command.
+
+   Only the services advertised by the client that are also available on
+   the server are advertised.
+
+v1capabilities
+   The capabilities string that would be returned by a version 1 response.
+
+The client can then inspect the server-advertised APIs and decide which
+API to use, including continuing to use the HTTP Version 1 Transport.
+
+HTTP Version 1 Transport
+------------------------
+
+Commands are issued as HTTP/1.0 or HTTP/1.1 requests. Commands are
+sent to the base URL of the repository with the command name sent in
+the ``cmd`` query string parameter. e.g.
+``https://example.com/repo?cmd=capabilities``. The HTTP method is ``GET``
+or ``POST`` depending on the command and whether there is a request
+body.
+
+Command arguments can be sent multiple ways.
+
+The simplest is part of the URL query string using ``x-www-form-urlencoded``
+encoding (see Python's ``urllib.urlencode()``. However, many servers impose
+length limitations on the URL. So this mechanism is typically only used if
+the server doesn't support other mechanisms.
+
+If the server supports the ``httpheader`` capability, command arguments can
+be sent in HTTP request headers named ``X-HgArg-<N>`` where ``<N>`` is an
+integer starting at 1. A ``x-www-form-urlencoded`` representation of the
+arguments is obtained. This full string is then split into chunks and sent
+in numbered ``X-HgArg-<N>`` headers. The maximum length of each HTTP header
+is defined by the server in the ``httpheader`` capability value, which defaults
+to ``1024``. The server reassembles the encoded arguments string by
+concatenating the ``X-HgArg-<N>`` headers then URL decodes them into a
+dictionary.
+
+The list of ``X-HgArg-<N>`` headers should be added to the ``Vary`` request
+header to instruct caches to take these headers into consideration when caching
+requests.
+
+If the server supports the ``httppostargs`` capability, the client
+may send command arguments in the HTTP request body as part of an
+HTTP POST request. The command arguments will be URL encoded just like
+they would for sending them via HTTP headers. However, no splitting is
+performed: the raw arguments are included in the HTTP request body.
+
+The client sends a ``X-HgArgs-Post`` header with the string length of the
+encoded arguments data. Additional data may be included in the HTTP
+request body immediately following the argument data. The offset of the
+non-argument data is defined by the ``X-HgArgs-Post`` header. The
+``X-HgArgs-Post`` header is not required if there is no argument data.
+
+Additional command data can be sent as part of the HTTP request body. The
+default ``Content-Type`` when sending data is ``application/mercurial-0.1``.
+A ``Content-Length`` header is currently always sent.
+
+Example HTTP requests::
+
+    GET /repo?cmd=capabilities
+    X-HgArg-1: foo=bar&baz=hello%20world
+
+The request media type should be chosen based on server support. If the
+``httpmediatype`` server capability is present, the client should send
+the newest mutually supported media type. If this capability is absent,
+the client must assume the server only supports the
+``application/mercurial-0.1`` media type.
+
+The ``Content-Type`` HTTP response header identifies the response as coming
+from Mercurial and can also be used to signal an error has occurred.
+
+The ``application/mercurial-*`` media types indicate a generic Mercurial
+data type.
+
+The ``application/mercurial-0.1`` media type is raw Mercurial data. It is the
+predecessor of the format below.
+
+The ``application/mercurial-0.2`` media type is compression framed Mercurial
+data. The first byte of the payload indicates the length of the compression
+format identifier that follows. Next are N bytes indicating the compression
+format. e.g. ``zlib``. The remaining bytes are compressed according to that
+compression format. The decompressed data behaves the same as with
+``application/mercurial-0.1``.
+
+The ``application/hg-error`` media type indicates a generic error occurred.
+The content of the HTTP response body typically holds text describing the
+error.
+
+The ``application/mercurial-cbor`` media type indicates a CBOR payload
+and should be interpreted as identical to ``application/cbor``.
+
+Behavior of media types is further described in the ``Content Negotiation``
+section below.
+
+Clients should issue a ``User-Agent`` request header that identifies the client.
+The server should not use the ``User-Agent`` for feature detection.
+
+A command returning a ``string`` response issues a
+``application/mercurial-0.*`` media type and the HTTP response body contains
+the raw string value (after compression decoding, if used). A
+``Content-Length`` header is typically issued, but not required.
+
+A command returning a ``stream`` response issues a
+``application/mercurial-0.*`` media type and the HTTP response is typically
+using *chunked transfer* (``Transfer-Encoding: chunked``).
+
+HTTP Version 2 Transport
+------------------------
+
+**Experimental - feature under active development**
+
+Version 2 of the HTTP protocol is exposed under the ``/api/*`` URL space.
+It's final API name is not yet formalized.
+
+Commands are triggered by sending HTTP POST requests against URLs of the
+form ``<permission>/<command>``, where ``<permission>`` is ``ro`` or
+``rw``, meaning read-only and read-write, respectively and ``<command>``
+is a named wire protocol command.
+
+Non-POST request methods MUST be rejected by the server with an HTTP
+405 response.
+
+Commands that modify repository state in meaningful ways MUST NOT be
+exposed under the ``ro`` URL prefix. All available commands MUST be
+available under the ``rw`` URL prefix.
+
+Server adminstrators MAY implement blanket HTTP authentication keyed
+off the URL prefix. For example, a server may require authentication
+for all ``rw/*`` URLs and let unauthenticated requests to ``ro/*``
+URL proceed. A server MAY issue an HTTP 401, 403, or 407 response
+in accordance with RFC 7235. Clients SHOULD recognize the HTTP Basic
+(RFC 7617) and Digest (RFC 7616) authentication schemes. Clients SHOULD
+make an attempt to recognize unknown schemes using the
+``WWW-Authenticate`` response header on a 401 response, as defined by
+RFC 7235.
+
+Read-only commands are accessible under ``rw/*`` URLs so clients can
+signal the intent of the operation very early in the connection
+lifecycle. For example, a ``push`` operation - which consists of
+various read-only commands mixed with at least one read-write command -
+can perform all commands against ``rw/*`` URLs so that any server-side
+authentication requirements are discovered upon attempting the first
+command - not potentially several commands into the exchange. This
+allows clients to fail faster or prompt for credentials as soon as the
+exchange takes place. This provides a better end-user experience.
+
+Requests to unknown commands or URLS result in an HTTP 404.
+TODO formally define response type, how error is communicated, etc.
+
+HTTP request and response bodies use the ``hgrpc`` protocol for media
+exchange.` (See :hg:`help internals.wireprotocolrpc` for details of
+the protocol.) The entirety of the HTTP message body is 0 or more frames
+as defined by this protocol.
+
+Clients and servers MUST advertise the ``TBD`` media type via the
+``Content-Type`` request and response headers. In addition, clients MUST
+advertise this media type value in their ``Accept`` request header in all
+requests.
+TODO finalize the media type. For now, it is defined in wireprotoserver.py.
+
+Servers receiving requests without an ``Accept`` header SHOULD respond with
+an HTTP 406.
+
+Servers receiving requests with an invalid ``Content-Type`` header SHOULD
+respond with an HTTP 415.
+
+The command to run is specified in the POST payload as defined by ``hgrpc``.
+This is redundant with data already encoded in the URL. This is by design,
+so server operators can have better understanding about server activity from
+looking merely at HTTP access logs.
+
+In most circumstances, the command specified in the URL MUST match
+the command specified in the frame-based payload or the server will
+respond with an error. The exception to this is the special
+``multirequest`` URL. (See below.) In addition, HTTP requests
+are limited to one command invocation. The exception is the special
+``multirequest`` URL.
+
+The ``multirequest`` command endpoints (``ro/multirequest`` and
+``rw/multirequest``) are special in that they allow the execution of
+*any* command and allow the execution of multiple commands. If the
+HTTP request issues multiple commands across multiple frames, all
+issued commands will be processed by the server. Per the defined
+behavior of ``hgrpc```, commands may be issued interleaved and responses
+may come back in a different order than they were issued. Clients MUST
+be able to deal with this.
+
+SSH Protocol
+============
+
+Handshake
+---------
+
+For all clients, the handshake consists of the client sending 1 or more
+commands to the server using version 1 of the transport. Servers respond
+to commands they know how to respond to and send an empty response (``0\n``)
+for unknown commands (per standard behavior of version 1 of the transport).
+Clients then typically look for a response to the newest sent command to
+determine which transport version to use and what the available features for
+the connection and server are.
+
+Preceding any response from client-issued commands, the server may print
+non-protocol output. It is common for SSH servers to print banners, message
+of the day announcements, etc when clients connect. It is assumed that any
+such *banner* output will precede any Mercurial server output. So clients
+must be prepared to handle server output on initial connect that isn't
+in response to any client-issued command and doesn't conform to Mercurial's
+wire protocol. This *banner* output should only be on stdout. However,
+some servers may send output on stderr.
+
+Pre 0.9.1 clients issue a ``between`` command with the ``pairs`` argument
+having the value
+``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``.
+
+The ``between`` command has been supported since the original Mercurial
+SSH server. Requesting the empty range will return a ``\n`` string response,
+which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline
+followed by the value, which happens to be a newline).
+
+For pre 0.9.1 clients and all servers, the exchange looks like::
+
+   c: between\n
+   c: pairs 81\n
+   c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+   s: 1\n
+   s: \n
+
+0.9.1+ clients send a ``hello`` command (with no arguments) before the
+``between`` command. The response to this command allows clients to
+discover server capabilities and settings.
+
+An example exchange between 0.9.1+ clients and a ``hello`` aware server looks
+like::
+
+   c: hello\n
+   c: between\n
+   c: pairs 81\n
+   c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+   s: 324\n
+   s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
+   s: 1\n
+   s: \n
+
+And a similar scenario but with servers sending a banner on connect::
+
+   c: hello\n
+   c: between\n
+   c: pairs 81\n
+   c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+   s: welcome to the server\n
+   s: if you find any issues, email someone@somewhere.com\n
+   s: 324\n
+   s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
+   s: 1\n
+   s: \n
+
+Note that output from the ``hello`` command is terminated by a ``\n``. This is
+part of the response payload and not part of the wire protocol adding a newline
+after responses. In other words, the length of the response contains the
+trailing ``\n``.
+
+Clients supporting version 2 of the SSH transport send a line beginning
+with ``upgrade`` before the ``hello`` and ``between`` commands. The line
+(which isn't a well-formed command line because it doesn't consist of a
+single command name) serves to both communicate the client's intent to
+switch to transport version 2 (transports are version 1 by default) as
+well as to advertise the client's transport-level capabilities so the
+server may satisfy that request immediately.
+
+The upgrade line has the form:
+
+    upgrade <token> <transport capabilities>
+
+That is the literal string ``upgrade`` followed by a space, followed by
+a randomly generated string, followed by a space, followed by a string
+denoting the client's transport capabilities.
+
+The token can be anything. However, a random UUID is recommended. (Use
+of version 4 UUIDs is recommended because version 1 UUIDs can leak the
+client's MAC address.)
+
+The transport capabilities string is a URL/percent encoded string
+containing key-value pairs defining the client's transport-level
+capabilities. The following capabilities are defined:
+
+proto
+   A comma-delimited list of transport protocol versions the client
+   supports. e.g. ``ssh-v2``.
+
+If the server does not recognize the ``upgrade`` line, it should issue
+an empty response and continue processing the ``hello`` and ``between``
+commands. Here is an example handshake between a version 2 aware client
+and a non version 2 aware server:
+
+   c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2
+   c: hello\n
+   c: between\n
+   c: pairs 81\n
+   c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+   s: 0\n
+   s: 324\n
+   s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
+   s: 1\n
+   s: \n
+
+(The initial ``0\n`` line from the server indicates an empty response to
+the unknown ``upgrade ..`` command/line.)
+
+If the server recognizes the ``upgrade`` line and is willing to satisfy that
+upgrade request, it replies to with a payload of the following form:
+
+   upgraded <token> <transport name>\n
+
+This line is the literal string ``upgraded``, a space, the token that was
+specified by the client in its ``upgrade ...`` request line, a space, and the
+name of the transport protocol that was chosen by the server. The transport
+name MUST match one of the names the client specified in the ``proto`` field
+of its ``upgrade ...`` request line.
+
+If a server issues an ``upgraded`` response, it MUST also read and ignore
+the lines associated with the ``hello`` and ``between`` command requests
+that were issued by the server. It is assumed that the negotiated transport
+will respond with equivalent requested information following the transport
+handshake.
+
+All data following the ``\n`` terminating the ``upgraded`` line is the
+domain of the negotiated transport. It is common for the data immediately
+following to contain additional metadata about the state of the transport and
+the server. However, this isn't strictly speaking part of the transport
+handshake and isn't covered by this section.
+
+Here is an example handshake between a version 2 aware client and a version
+2 aware server:
+
+   c:  upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2
+   c:  hello\n
+   c:  between\n
+   c:  pairs 81\n
+   c:  0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+   s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n
+   s: <additional transport specific data>
+
+The client-issued token that is echoed in the response provides a more
+resilient mechanism for differentiating *banner* output from Mercurial
+output. In version 1, properly formatted banner output could get confused
+for Mercurial server output. By submitting a randomly generated token
+that is then present in the response, the client can look for that token
+in response lines and have reasonable certainty that the line did not
+originate from a *banner* message.
+
+SSH Version 1 Transport
+-----------------------
+
+The SSH transport (version 1) is a custom text-based protocol suitable for
+use over any bi-directional stream transport. It is most commonly used with
+SSH.
+
+A SSH transport server can be started with ``hg serve --stdio``. The stdin,
+stderr, and stdout file descriptors of the started process are used to exchange
+data. When Mercurial connects to a remote server over SSH, it actually starts
+a ``hg serve --stdio`` process on the remote server.
+
+Commands are issued by sending the command name followed by a trailing newline
+``\n`` to the server. e.g. ``capabilities\n``.
+
+Command arguments are sent in the following format::
+
+    <argument> <length>\n<value>
+
+That is, the argument string name followed by a space followed by the
+integer length of the value (expressed as a string) followed by a newline
+(``\n``) followed by the raw argument value.
+
+Dictionary arguments are encoded differently::
+
+    <argument> <# elements>\n
+    <key1> <length1>\n<value1>
+    <key2> <length2>\n<value2>
+    ...
+
+Non-argument data is sent immediately after the final argument value. It is
+encoded in chunks::
+
+    <length>\n<data>
+
+Each command declares a list of supported arguments and their types. If a
+client sends an unknown argument to the server, the server should abort
+immediately. The special argument ``*`` in a command's definition indicates
+that all argument names are allowed.
+
+The definition of supported arguments and types is initially made when a
+new command is implemented. The client and server must initially independently
+agree on the arguments and their types. This initial set of arguments can be
+supplemented through the presence of *capabilities* advertised by the server.
+
+Each command has a defined expected response type.
+
+A ``string`` response type is a length framed value. The response consists of
+the string encoded integer length of a value followed by a newline (``\n``)
+followed by the value. Empty values are allowed (and are represented as
+``0\n``).
+
+A ``stream`` response type consists of raw bytes of data. There is no framing.
+
+A generic error response type is also supported. It consists of a an error
+message written to ``stderr`` followed by ``\n-\n``. In addition, ``\n`` is
+written to ``stdout``.
+
+If the server receives an unknown command, it will send an empty ``string``
+response.
+
+The server terminates if it receives an empty command (a ``\n`` character).
+
+If the server announces support for the ``protocaps`` capability, the client
+should issue a ``protocaps`` command after the initial handshake to annonunce
+its own capabilities. The client capabilities are persistent.
+
+SSH Version 2 Transport
+-----------------------
+
+**Experimental and under development**
+
+Version 2 of the SSH transport behaves identically to version 1 of the SSH
+transport with the exception of handshake semantics. See above for how
+version 2 of the SSH transport is negotiated.
+
+Immediately following the ``upgraded`` line signaling a switch to version
+2 of the SSH protocol, the server automatically sends additional details
+about the capabilities of the remote server. This has the form:
+
+   <integer length of value>\n
+   capabilities: ...\n
+
+e.g.
+
+   s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n
+   s: 240\n
+   s: capabilities: known getbundle batch ...\n
+
+Following capabilities advertisement, the peers communicate using version
+1 of the SSH transport.
+
+Capabilities
+============
+
+Servers advertise supported wire protocol features. This allows clients to
+probe for server features before blindly calling a command or passing a
+specific argument.
+
+The server's features are exposed via a *capabilities* string. This is a
+space-delimited string of tokens/features. Some features are single words
+like ``lookup`` or ``batch``. Others are complicated key-value pairs
+advertising sub-features. e.g. ``httpheader=2048``. When complex, non-word
+values are used, each feature name can define its own encoding of sub-values.
+Comma-delimited and ``x-www-form-urlencoded`` values are common.
+
+The following document capabilities defined by the canonical Mercurial server
+implementation.
+
+batch
+-----
+
+Whether the server supports the ``batch`` command.
+
+This capability/command was introduced in Mercurial 1.9 (released July 2011).
+
+branchmap
+---------
+
+Whether the server supports the ``branchmap`` command.
+
+This capability/command was introduced in Mercurial 1.3 (released July 2009).
+
+bundle2-exp
+-----------
+
+Precursor to ``bundle2`` capability that was used before bundle2 was a
+stable feature.
+
+This capability was introduced in Mercurial 3.0 behind an experimental
+flag. This capability should not be observed in the wild.
+
+bundle2
+-------
+
+Indicates whether the server supports the ``bundle2`` data exchange format.
+
+The value of the capability is a URL quoted, newline (``\n``) delimited
+list of keys or key-value pairs.
+
+A key is simply a URL encoded string.
+
+A key-value pair is a URL encoded key separated from a URL encoded value by
+an ``=``. If the value is a list, elements are delimited by a ``,`` after
+URL encoding.
+
+For example, say we have the values::
+
+  {'HG20': [], 'changegroup': ['01', '02'], 'digests': ['sha1', 'sha512']}
+
+We would first construct a string::
+
+  HG20\nchangegroup=01,02\ndigests=sha1,sha512
+
+We would then URL quote this string::
+
+  HG20%0Achangegroup%3D01%2C02%0Adigests%3Dsha1%2Csha512
+
+This capability was introduced in Mercurial 3.4 (released May 2015).
+
+changegroupsubset
+-----------------
+
+Whether the server supports the ``changegroupsubset`` command.
+
+This capability was introduced in Mercurial 0.9.2 (released December
+2006).
+
+This capability was introduced at the same time as the ``lookup``
+capability/command.
+
+compression
+-----------
+
+Declares support for negotiating compression formats.
+
+Presence of this capability indicates the server supports dynamic selection
+of compression formats based on the client request.
+
+Servers advertising this capability are required to support the
+``application/mercurial-0.2`` media type in response to commands returning
+streams. Servers may support this media type on any command.
+
+The value of the capability is a comma-delimited list of strings declaring
+supported compression formats. The order of the compression formats is in
+server-preferred order, most preferred first.
+
+The identifiers used by the official Mercurial distribution are:
+
+bzip2
+   bzip2
+none
+   uncompressed / raw data
+zlib
+   zlib (no gzip header)
+zstd
+   zstd
+
+This capability was introduced in Mercurial 4.1 (released February 2017).
+
+getbundle
+---------
+
+Whether the server supports the ``getbundle`` command.
+
+This capability was introduced in Mercurial 1.9 (released July 2011).
+
+httpheader
+----------
+
+Whether the server supports receiving command arguments via HTTP request
+headers.
+
+The value of the capability is an integer describing the max header
+length that clients should send. Clients should ignore any content after a
+comma in the value, as this is reserved for future use.
+
+This capability was introduced in Mercurial 1.9 (released July 2011).
+
+httpmediatype
+-------------
+
+Indicates which HTTP media types (``Content-Type`` header) the server is
+capable of receiving and sending.
+
+The value of the capability is a comma-delimited list of strings identifying
+support for media type and transmission direction. The following strings may
+be present:
+
+0.1rx
+   Indicates server support for receiving ``application/mercurial-0.1`` media
+   types.
+
+0.1tx
+   Indicates server support for sending ``application/mercurial-0.1`` media
+   types.
+
+0.2rx
+   Indicates server support for receiving ``application/mercurial-0.2`` media
+   types.
+
+0.2tx
+   Indicates server support for sending ``application/mercurial-0.2`` media
+   types.
+
+minrx=X
+   Minimum media type version the server is capable of receiving. Value is a
+   string like ``0.2``.
+
+   This capability can be used by servers to limit connections from legacy
+   clients not using the latest supported media type. However, only clients
+   with knowledge of this capability will know to consult this value. This
+   capability is present so the client may issue a more user-friendly error
+   when the server has locked out a legacy client.
+
+mintx=X
+   Minimum media type version the server is capable of sending. Value is a
+   string like ``0.1``.
+
+Servers advertising support for the ``application/mercurial-0.2`` media type
+should also advertise the ``compression`` capability.
+
+This capability was introduced in Mercurial 4.1 (released February 2017).
+
+httppostargs
+------------
+
+**Experimental**
+
+Indicates that the server supports and prefers clients send command arguments
+via a HTTP POST request as part of the request body.
+
+This capability was introduced in Mercurial 3.8 (released May 2016).
+
+known
+-----
+
+Whether the server supports the ``known`` command.
+
+This capability/command was introduced in Mercurial 1.9 (released July 2011).
+
+lfs
+---
+
+Indicates that the LFS extension is enabled on the server.  It makes no claims
+about the repository actually having LFS blobs committed to it.
+
+This capability was introduced by the LFS extension in Mercurial 4.5 (released
+Feb 2018).
+
+lfs-serve
+---------
+
+Indicates that the LFS extension is enabled on the server, and LFS blobs are
+committed to the remote repository.  (Specifically, it indicates that the 'lfs'
+requirement is present in the remote repository.)
+
+This capability was introduced by the LFS extension in Mercurial 4.8 (released
+Nov 2018).
+
+lookup
+------
+
+Whether the server supports the ``lookup`` command.
+
+This capability was introduced in Mercurial 0.9.2 (released December
+2006).
+
+This capability was introduced at the same time as the ``changegroupsubset``
+capability/command.
+
+partial-pull
+------------
+
+Indicates that the client can deal with partial answers to pull requests
+by repeating the request.
+
+If this parameter is not advertised, the server will not send pull bundles.
+
+This client capability was introduced in Mercurial 4.6.
+
+protocaps
+---------
+
+Whether the server supports the ``protocaps`` command for SSH V1 transport.
+
+This capability was introduced in Mercurial 4.6.
+
+pushkey
+-------
+
+Whether the server supports the ``pushkey`` and ``listkeys`` commands.
+
+This capability was introduced in Mercurial 1.6 (released July 2010).
+
+standardbundle
+--------------
+
+**Unsupported**
+
+This capability was introduced during the Mercurial 0.9.2 development cycle in
+2006. It was never present in a release, as it was replaced by the ``unbundle``
+capability. This capability should not be encountered in the wild.
+
+stream-preferred
+----------------
+
+If present the server prefers that clients clone using the streaming clone
+protocol (``hg clone --stream``) rather than the standard
+changegroup/bundle based protocol.
+
+This capability was introduced in Mercurial 2.2 (released May 2012).
+
+streamreqs
+----------
+
+Indicates whether the server supports *streaming clones* and the *requirements*
+that clients must support to receive it.
+
+If present, the server supports the ``stream_out`` command, which transmits
+raw revlogs from the repository instead of changegroups. This provides a faster
+cloning mechanism at the expense of more bandwidth used.
+
+The value of this capability is a comma-delimited list of repo format
+*requirements*. These are requirements that impact the reading of data in
+the ``.hg/store`` directory. An example value is
+``streamreqs=generaldelta,revlogv1`` indicating the server repo requires
+the ``revlogv1`` and ``generaldelta`` requirements.
+
+If the only format requirement is ``revlogv1``, the server may expose the
+``stream`` capability instead of the ``streamreqs`` capability.
+
+This capability was introduced in Mercurial 1.7 (released November 2010).
+
+stream
+------
+
+Whether the server supports *streaming clones* from ``revlogv1`` repos.
+
+If present, the server supports the ``stream_out`` command, which transmits
+raw revlogs from the repository instead of changegroups. This provides a faster
+cloning mechanism at the expense of more bandwidth used.
+
+This capability was introduced in Mercurial 0.9.1 (released July 2006).
+
+When initially introduced, the value of the capability was the numeric
+revlog revision. e.g. ``stream=1``. This indicates the changegroup is using
+``revlogv1``. This simple integer value wasn't powerful enough, so the
+``streamreqs`` capability was invented to handle cases where the repo
+requirements have more than just ``revlogv1``. Newer servers omit the
+``=1`` since it was the only value supported and the value of ``1`` can
+be implied by clients.
+
+unbundlehash
+------------
+
+Whether the ``unbundle`` commands supports receiving a hash of all the
+heads instead of a list.
+
+For more, see the documentation for the ``unbundle`` command.
+
+This capability was introduced in Mercurial 1.9 (released July 2011).
+
+unbundle
+--------
+
+Whether the server supports pushing via the ``unbundle`` command.
+
+This capability/command has been present since Mercurial 0.9.1 (released
+July 2006).
+
+Mercurial 0.9.2 (released December 2006) added values to the capability
+indicating which bundle types the server supports receiving. This value is a
+comma-delimited list. e.g. ``HG10GZ,HG10BZ,HG10UN``. The order of values
+reflects the priority/preference of that type, where the first value is the
+most preferred type.
+
+Content Negotiation
+===================
+
+The wire protocol has some mechanisms to help peers determine what content
+types and encoding the other side will accept. Historically, these mechanisms
+have been built into commands themselves because most commands only send a
+well-defined response type and only certain commands needed to support
+functionality like compression.
+
+Currently, only the HTTP version 1 transport supports content negotiation
+at the protocol layer.
+
+HTTP requests advertise supported response formats via the ``X-HgProto-<N>``
+request header, where ``<N>`` is an integer starting at 1 allowing the logical
+value to span multiple headers. This value consists of a list of
+space-delimited parameters. Each parameter denotes a feature or capability.
+
+The following parameters are defined:
+
+0.1
+   Indicates the client supports receiving ``application/mercurial-0.1``
+   responses.
+
+0.2
+   Indicates the client supports receiving ``application/mercurial-0.2``
+   responses.
+
+cbor
+   Indicates the client supports receiving ``application/mercurial-cbor``
+   responses.
+
+   (Only intended to be used with version 2 transports.)
+
+comp
+   Indicates compression formats the client can decode. Value is a list of
+   comma delimited strings identifying compression formats ordered from
+   most preferential to least preferential. e.g. ``comp=zstd,zlib,none``.
+
+   This parameter does not have an effect if only the ``0.1`` parameter
+   is defined, as support for ``application/mercurial-0.2`` or greater is
+   required to use arbitrary compression formats.
+
+   If this parameter is not advertised, the server interprets this as
+   equivalent to ``zlib,none``.
+
+Clients may choose to only send this header if the ``httpmediatype``
+server capability is present, as currently all server-side features
+consulting this header require the client to opt in to new protocol features
+advertised via the ``httpmediatype`` capability.
+
+A server that doesn't receive an ``X-HgProto-<N>`` header should infer a
+value of ``0.1``. This is compatible with legacy clients.
+
+A server receiving a request indicating support for multiple media type
+versions may respond with any of the supported media types. Not all servers
+may support all media types on all commands.
+
+Commands
+========
+
+This section contains a list of all wire protocol commands implemented by
+the canonical Mercurial server.
+
+See :hg:`help internals.wireprotocolv2` for information on commands exposed
+to the frame-based protocol.
+
+batch
+-----
+
+Issue multiple commands while sending a single command request. The purpose
+of this command is to allow a client to issue multiple commands while avoiding
+multiple round trips to the server therefore enabling commands to complete
+quicker.
+
+The command accepts a ``cmds`` argument that contains a list of commands to
+execute.
+
+The value of ``cmds`` is a ``;`` delimited list of strings. Each string has the
+form ``<command> <arguments>``. That is, the command name followed by a space
+followed by an argument string.
+
+The argument string is a ``,`` delimited list of ``<key>=<value>`` values
+corresponding to command arguments. Both the argument name and value are
+escaped using a special substitution map::
+
+   : -> :c
+   , -> :o
+   ; -> :s
+   = -> :e
+
+The response type for this command is ``string``. The value contains a
+``;`` delimited list of responses for each requested command. Each value
+in this list is escaped using the same substitution map used for arguments.
+
+If an error occurs, the generic error response may be sent.
+
+between
+-------
+
+(Legacy command used for discovery in old clients)
+
+Obtain nodes between pairs of nodes.
+
+The ``pairs`` arguments contains a space-delimited list of ``-`` delimited
+hex node pairs. e.g.::
+
+   a072279d3f7fd3a4aa7ffa1a5af8efc573e1c896-6dc58916e7c070f678682bfe404d2e2d68291a18
+
+Return type is a ``string``. Value consists of lines corresponding to each
+requested range. Each line contains a space-delimited list of hex nodes.
+A newline ``\n`` terminates each line, including the last one.
+
+branchmap
+---------
+
+Obtain heads in named branches.
+
+Accepts no arguments. Return type is a ``string``.
+
+Return value contains lines with URL encoded branch names followed by a space
+followed by a space-delimited list of hex nodes of heads on that branch.
+e.g.::
+
+    default a072279d3f7fd3a4aa7ffa1a5af8efc573e1c896 6dc58916e7c070f678682bfe404d2e2d68291a18
+    stable baae3bf31522f41dd5e6d7377d0edd8d1cf3fccc
+
+There is no trailing newline.
+
+branches
+--------
+
+(Legacy command used for discovery in old clients. Clients with ``getbundle``
+use the ``known`` and ``heads`` commands instead.)
+
+Obtain ancestor changesets of specific nodes back to a branch point.
+
+Despite the name, this command has nothing to do with Mercurial named branches.
+Instead, it is related to DAG branches.
+
+The command accepts a ``nodes`` argument, which is a string of space-delimited
+hex nodes.
+
+For each node requested, the server will find the first ancestor node that is
+a DAG root or is a merge.
+
+Return type is a ``string``. Return value contains lines with result data for
+each requested node. Each line contains space-delimited nodes followed by a
+newline (``\n``). The 4 nodes reported on each line correspond to the requested
+node, the ancestor node found, and its 2 parent nodes (which may be the null
+node).
+
+capabilities
+------------
+
+Obtain the capabilities string for the repo.
+
+Unlike the ``hello`` command, the capabilities string is not prefixed.
+There is no trailing newline.
+
+This command does not accept any arguments. Return type is a ``string``.
+
+This command was introduced in Mercurial 0.9.1 (released July 2006).
+
+changegroup
+-----------
+
+(Legacy command: use ``getbundle`` instead)
+
+Obtain a changegroup version 1 with data for changesets that are
+descendants of client-specified changesets.
+
+The ``roots`` arguments contains a list of space-delimited hex nodes.
+
+The server responds with a changegroup version 1 containing all
+changesets between the requested root/base nodes and the repo's head nodes
+at the time of the request.
+
+The return type is a ``stream``.
+
+changegroupsubset
+-----------------
+
+(Legacy command: use ``getbundle`` instead)
+
+Obtain a changegroup version 1 with data for changesetsets between
+client specified base and head nodes.
+
+The ``bases`` argument contains a list of space-delimited hex nodes.
+The ``heads`` argument contains a list of space-delimited hex nodes.
+
+The server responds with a changegroup version 1 containing all
+changesets between the requested base and head nodes at the time of the
+request.
+
+The return type is a ``stream``.
+
+clonebundles
+------------
+
+Obtains a manifest of bundle URLs available to seed clones.
+
+Each returned line contains a URL followed by metadata. See the
+documentation in the ``clonebundles`` extension for more.
+
+The return type is a ``string``.
+
+getbundle
+---------
+
+Obtain a bundle containing repository data.
+
+This command accepts the following arguments:
+
+heads
+   List of space-delimited hex nodes of heads to retrieve.
+common
+   List of space-delimited hex nodes that the client has in common with the
+   server.
+obsmarkers
+   Boolean indicating whether to include obsolescence markers as part
+   of the response. Only works with bundle2.
+bundlecaps
+   Comma-delimited set of strings defining client bundle capabilities.
+listkeys
+   Comma-delimited list of strings of ``pushkey`` namespaces. For each
+   namespace listed, a bundle2 part will be included with the content of
+   that namespace.
+cg
+   Boolean indicating whether changegroup data is requested.
+cbattempted
+   Boolean indicating whether the client attempted to use the *clone bundles*
+   feature before performing this request.
+bookmarks
+   Boolean indicating whether bookmark data is requested.
+phases
+   Boolean indicating whether phases data is requested.
+
+The return type on success is a ``stream`` where the value is bundle.
+On the HTTP version 1 transport, the response is zlib compressed.
+
+If an error occurs, a generic error response can be sent.
+
+Unless the client sends a false value for the ``cg`` argument, the returned
+bundle contains a changegroup with the nodes between the specified ``common``
+and ``heads`` nodes. Depending on the command arguments, the type and content
+of the returned bundle can vary significantly.
+
+The default behavior is for the server to send a raw changegroup version
+``01`` response.
+
+If the ``bundlecaps`` provided by the client contain a value beginning
+with ``HG2``, a bundle2 will be returned. The bundle2 data may contain
+additional repository data, such as ``pushkey`` namespace values.
+
+heads
+-----
+
+Returns a list of space-delimited hex nodes of repository heads followed
+by a newline. e.g.
+``a9eeb3adc7ddb5006c088e9eda61791c777cbf7c 31f91a3da534dc849f0d6bfc00a395a97cf218a1\n``
+
+This command does not accept any arguments. The return type is a ``string``.
+
+hello
+-----
+
+Returns lines describing interesting things about the server in an RFC-822
+like format.
+
+Currently, the only line defines the server capabilities. It has the form::
+
+    capabilities: <value>
+
+See above for more about the capabilities string.
+
+SSH clients typically issue this command as soon as a connection is
+established.
+
+This command does not accept any arguments. The return type is a ``string``.
+
+This command was introduced in Mercurial 0.9.1 (released July 2006).
+
+listkeys
+--------
+
+List values in a specified ``pushkey`` namespace.
+
+The ``namespace`` argument defines the pushkey namespace to operate on.
+
+The return type is a ``string``. The value is an encoded dictionary of keys.
+
+Key-value pairs are delimited by newlines (``\n``). Within each line, keys and
+values are separated by a tab (``\t``). Keys and values are both strings.
+
+lookup
+------
+
+Try to resolve a value to a known repository revision.
+
+The ``key`` argument is converted from bytes to an
+``encoding.localstr`` instance then passed into
+``localrepository.__getitem__`` in an attempt to resolve it.
+
+The return type is a ``string``.
+
+Upon successful resolution, returns ``1 <hex node>\n``. On failure,
+returns ``0 <error string>\n``. e.g.::
+
+   1 273ce12ad8f155317b2c078ec75a4eba507f1fba\n
+
+   0 unknown revision 'foo'\n
+
+known
+-----
+
+Determine whether multiple nodes are known.
+
+The ``nodes`` argument is a list of space-delimited hex nodes to check
+for existence.
+
+The return type is ``string``.
+
+Returns a string consisting of ``0``s and ``1``s indicating whether nodes
+are known. If the Nth node specified in the ``nodes`` argument is known,
+a ``1`` will be returned at byte offset N. If the node isn't known, ``0``
+will be present at byte offset N.
+
+There is no trailing newline.
+
+protocaps
+---------
+
+Notify the server about the client capabilities in the SSH V1 transport
+protocol.
+
+The ``caps`` argument is a space-delimited list of capabilities.
+
+The server will reply with the string ``OK``.
+
+pushkey
+-------
+
+Set a value using the ``pushkey`` protocol.
+
+Accepts arguments ``namespace``, ``key``, ``old``, and ``new``, which
+correspond to the pushkey namespace to operate on, the key within that
+namespace to change, the old value (which may be empty), and the new value.
+All arguments are string types.
+
+The return type is a ``string``. The value depends on the transport protocol.
+
+The SSH version 1 transport sends a string encoded integer followed by a
+newline (``\n``) which indicates operation result. The server may send
+additional output on the ``stderr`` stream that should be displayed to the
+user.
+
+The HTTP version 1 transport sends a string encoded integer followed by a
+newline followed by additional server output that should be displayed to
+the user. This may include output from hooks, etc.
+
+The integer result varies by namespace. ``0`` means an error has occurred
+and there should be additional output to display to the user.
+
+stream_out
+----------
+
+Obtain *streaming clone* data.
+
+The return type is either a ``string`` or a ``stream``, depending on
+whether the request was fulfilled properly.
+
+A return value of ``1\n`` indicates the server is not configured to serve
+this data. If this is seen by the client, they may not have verified the
+``stream`` capability is set before making the request.
+
+A return value of ``2\n`` indicates the server was unable to lock the
+repository to generate data.
+
+All other responses are a ``stream`` of bytes. The first line of this data
+contains 2 space-delimited integers corresponding to the path count and
+payload size, respectively::
+
+    <path count> <payload size>\n
+
+The ``<payload size>`` is the total size of path data: it does not include
+the size of the per-path header lines.
+
+Following that header are ``<path count>`` entries. Each entry consists of a
+line with metadata followed by raw revlog data. The line consists of::
+
+    <store path>\0<size>\n
+
+The ``<store path>`` is the encoded store path of the data that follows.
+``<size>`` is the amount of data for this store path/revlog that follows the
+newline.
+
+There is no trailer to indicate end of data. Instead, the client should stop
+reading after ``<path count>`` entries are consumed.
+
+unbundle
+--------
+
+Send a bundle containing data (usually changegroup data) to the server.
+
+Accepts the argument ``heads``, which is a space-delimited list of hex nodes
+corresponding to server repository heads observed by the client. This is used
+to detect race conditions and abort push operations before a server performs
+too much work or a client transfers too much data.
+
+The request payload consists of a bundle to be applied to the repository,
+similarly to as if :hg:`unbundle` were called.
+
+In most scenarios, a special ``push response`` type is returned. This type
+contains an integer describing the change in heads as a result of the
+operation. A value of ``0`` indicates nothing changed. ``1`` means the number
+of heads remained the same. Values ``2`` and larger indicate the number of
+added heads minus 1. e.g. ``3`` means 2 heads were added. Negative values
+indicate the number of fewer heads, also off by 1. e.g. ``-2`` means there
+is 1 fewer head.
+
+The encoding of the ``push response`` type varies by transport.
+
+For the SSH version 1 transport, this type is composed of 2 ``string``
+responses: an empty response (``0\n``) followed by the integer result value.
+e.g. ``1\n2``. So the full response might be ``0\n1\n2``.
+
+For the HTTP version 1 transport, the response is a ``string`` type composed
+of an integer result value followed by a newline (``\n``) followed by string
+content holding server output that should be displayed on the client (output
+hooks, etc).
+
+In some cases, the server may respond with a ``bundle2`` bundle. In this
+case, the response type is ``stream``. For the HTTP version 1 transport, the
+response is zlib compressed.
+
+The server may also respond with a generic error type, which contains a string
+indicating the failure.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/wireprotocolrpc.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,740 @@
+**Experimental and under development**
+
+This document describe's Mercurial's transport-agnostic remote procedure
+call (RPC) protocol which is used to perform interactions with remote
+servers. This protocol is also referred to as ``hgrpc``.
+
+The protocol has the following high-level features:
+
+* Concurrent request and response support (multiple commands can be issued
+  simultaneously and responses can be streamed simultaneously).
+* Supports half-duplex and full-duplex connections.
+* All data is transmitted within *frames*, which have a well-defined
+  header and encode their length.
+* Side-channels for sending progress updates and printing output. Text
+  output from the remote can be localized locally.
+* Support for simultaneous and long-lived compression streams, even across
+  requests.
+* Uses CBOR for data exchange.
+
+The protocol is not specific to Mercurial and could be used by other
+applications.
+
+High-level Overview
+===================
+
+To operate the protocol, a bi-directional, half-duplex pipe supporting
+ordered sends and receives is required. That is, each peer has one pipe
+for sending data and another for receiving. Full-duplex pipes are also
+supported.
+
+All data is read and written in atomic units called *frames*. These
+are conceptually similar to TCP packets. Higher-level functionality
+is built on the exchange and processing of frames.
+
+All frames are associated with a *stream*. A *stream* provides a
+unidirectional grouping of frames. Streams facilitate two goals:
+content encoding and parallelism. There is a dedicated section on
+streams below.
+
+The protocol is request-response based: the client issues requests to
+the server, which issues replies to those requests. Server-initiated
+messaging is not currently supported, but this specification carves
+out room to implement it.
+
+All frames are associated with a numbered request. Frames can thus
+be logically grouped by their request ID.
+
+Frames
+======
+
+Frames begin with an 8 octet header followed by a variable length
+payload::
+
+    +------------------------------------------------+
+    |                 Length (24)                    |
+    +--------------------------------+---------------+
+    |         Request ID (16)        | Stream ID (8) |
+    +------------------+-------------+---------------+
+    | Stream Flags (8) |
+    +-----------+------+
+    | Type (4)  |
+    +-----------+
+    | Flags (4) |
+    +===========+===================================================|
+    |                     Frame Payload (0...)                    ...
+    +---------------------------------------------------------------+
+
+The length of the frame payload is expressed as an unsigned 24 bit
+little endian integer. Values larger than 65535 MUST NOT be used unless
+given permission by the server as part of the negotiated capabilities
+during the handshake. The frame header is not part of the advertised
+frame length. The payload length is the over-the-wire length. If there
+is content encoding applied to the payload as part of the frame's stream,
+the length is the output of that content encoding, not the input.
+
+The 16-bit ``Request ID`` field denotes the integer request identifier,
+stored as an unsigned little endian integer. Odd numbered requests are
+client-initiated. Even numbered requests are server-initiated. This
+refers to where the *request* was initiated - not where the *frame* was
+initiated, so servers will send frames with odd ``Request ID`` in
+response to client-initiated requests. Implementations are advised to
+start ordering request identifiers at ``1`` and ``0``, increment by
+``2``, and wrap around if all available numbers have been exhausted.
+
+The 8-bit ``Stream ID`` field denotes the stream that the frame is
+associated with. Frames belonging to a stream may have content
+encoding applied and the receiver may need to decode the raw frame
+payload to obtain the original data. Odd numbered IDs are
+client-initiated. Even numbered IDs are server-initiated.
+
+The 8-bit ``Stream Flags`` field defines stream processing semantics.
+See the section on streams below.
+
+The 4-bit ``Type`` field denotes the type of frame being sent.
+
+The 4-bit ``Flags`` field defines special, per-type attributes for
+the frame.
+
+The sections below define the frame types and their behavior.
+
+Command Request (``0x01``)
+--------------------------
+
+This frame contains a request to run a command.
+
+The payload consists of a CBOR map defining the command request. The
+bytestring keys of that map are:
+
+name
+   Name of the command that should be executed (bytestring).
+args
+   Map of bytestring keys to various value types containing the named
+   arguments to this command.
+
+   Each command defines its own set of argument names and their expected
+   types.
+
+redirect (optional)
+   (map) Advertises client support for following response *redirects*.
+
+   This map has the following bytestring keys:
+
+   targets
+      (array of bytestring) List of named redirect targets supported by
+      this client. The names come from the targets advertised by the
+      server's *capabilities* message.
+
+   hashes
+      (array of bytestring) List of preferred hashing algorithms that can
+      be used for content integrity verification.
+
+   See the *Content Redirects* section below for more on content redirects.
+
+This frame type MUST ONLY be sent from clients to servers: it is illegal
+for a server to send this frame to a client.
+
+The following flag values are defined for this type:
+
+0x01
+   New command request. When set, this frame represents the beginning
+   of a new request to run a command. The ``Request ID`` attached to this
+   frame MUST NOT be active.
+0x02
+   Command request continuation. When set, this frame is a continuation
+   from a previous command request frame for its ``Request ID``. This
+   flag is set when the CBOR data for a command request does not fit
+   in a single frame.
+0x04
+   Additional frames expected. When set, the command request didn't fit
+   into a single frame and additional CBOR data follows in a subsequent
+   frame.
+0x08
+   Command data frames expected. When set, command data frames are
+   expected to follow the final command request frame for this request.
+
+``0x01`` MUST be set on the initial command request frame for a
+``Request ID``.
+
+``0x01`` or ``0x02`` MUST be set to indicate this frame's role in
+a series of command request frames.
+
+If command data frames are to be sent, ``0x08`` MUST be set on ALL
+command request frames.
+
+Command Data (``0x02``)
+-----------------------
+
+This frame contains raw data for a command.
+
+Most commands can be executed by specifying arguments. However,
+arguments have an upper bound to their length. For commands that
+accept data that is beyond this length or whose length isn't known
+when the command is initially sent, they will need to stream
+arbitrary data to the server. This frame type facilitates the sending
+of this data.
+
+The payload of this frame type consists of a stream of raw data to be
+consumed by the command handler on the server. The format of the data
+is command specific.
+
+The following flag values are defined for this type:
+
+0x01
+   Command data continuation. When set, the data for this command
+   continues into a subsequent frame.
+
+0x02
+   End of data. When set, command data has been fully sent to the
+   server. The command has been fully issued and no new data for this
+   command will be sent. The next frame will belong to a new command.
+
+Command Response Data (``0x03``)
+--------------------------------
+
+This frame contains response data to an issued command.
+
+Response data ALWAYS consists of a series of 1 or more CBOR encoded
+values. A CBOR value may be using indefinite length encoding. And the
+bytes constituting the value may span several frames.
+
+The following flag values are defined for this type:
+
+0x01
+   Data continuation. When set, an additional frame containing response data
+   will follow.
+0x02
+   End of data. When set, the response data has been fully sent and
+   no additional frames for this response will be sent.
+
+The ``0x01`` flag is mutually exclusive with the ``0x02`` flag.
+
+Error Occurred (``0x05``)
+-------------------------
+
+Some kind of error occurred.
+
+There are 3 general kinds of failures that can occur:
+
+* Command error encountered before any response issued
+* Command error encountered after a response was issued
+* Protocol or stream level error
+
+This frame type is used to capture the latter cases. (The general
+command error case is handled by the leading CBOR map in
+``Command Response`` frames.)
+
+The payload of this frame contains a CBOR map detailing the error. That
+map has the following bytestring keys:
+
+type
+   (bytestring) The overall type of error encountered. Can be one of the
+   following values:
+
+   protocol
+      A protocol-level error occurred. This typically means someone
+      is violating the framing protocol semantics and the server is
+      refusing to proceed.
+
+   server
+      A server-level error occurred. This typically indicates some kind of
+      logic error on the server, likely the fault of the server.
+
+   command
+      A command-level error, likely the fault of the client.
+
+message
+   (array of maps) A richly formatted message that is intended for
+   human consumption. See the ``Human Output Side-Channel`` frame
+   section for a description of the format of this data structure.
+
+Human Output Side-Channel (``0x06``)
+------------------------------------
+
+This frame contains a message that is intended to be displayed to
+people. Whereas most frames communicate machine readable data, this
+frame communicates textual data that is intended to be shown to
+humans.
+
+The frame consists of a series of *formatting requests*. Each formatting
+request consists of a formatting string, arguments for that formatting
+string, and labels to apply to that formatting string.
+
+A formatting string is a printf()-like string that allows variable
+substitution within the string. Labels allow the rendered text to be
+*decorated*. Assuming use of the canonical Mercurial code base, a
+formatting string can be the input to the ``i18n._`` function. This
+allows messages emitted from the server to be localized. So even if
+the server has different i18n settings, people could see messages in
+their *native* settings. Similarly, the use of labels allows
+decorations like coloring and underlining to be applied using the
+client's configured rendering settings.
+
+Formatting strings are similar to ``printf()`` strings or how
+Python's ``%`` operator works. The only supported formatting sequences
+are ``%s`` and ``%%``. ``%s`` will be replaced by whatever the string
+at that position resolves to. ``%%`` will be replaced by ``%``. All
+other 2-byte sequences beginning with ``%`` represent a literal
+``%`` followed by that character. However, future versions of the
+wire protocol reserve the right to allow clients to opt in to receiving
+formatting strings with additional formatters, hence why ``%%`` is
+required to represent the literal ``%``.
+
+The frame payload consists of a CBOR array of CBOR maps. Each map
+defines an *atom* of text data to print. Each *atom* has the following
+bytestring keys:
+
+msg
+   (bytestring) The formatting string. Content MUST be ASCII.
+args (optional)
+   Array of bytestrings defining arguments to the formatting string.
+labels (optional)
+   Array of bytestrings defining labels to apply to this atom.
+
+All data to be printed MUST be encoded into a single frame: this frame
+does not support spanning data across multiple frames.
+
+All textual data encoded in these frames is assumed to be line delimited.
+The last atom in the frame SHOULD end with a newline (``\n``). If it
+doesn't, clients MAY add a newline to facilitate immediate printing.
+
+Progress Update (``0x07``)
+--------------------------
+
+This frame holds the progress of an operation on the peer. Consumption
+of these frames allows clients to display progress bars, estimated
+completion times, etc.
+
+Each frame defines the progress of a single operation on the peer. The
+payload consists of a CBOR map with the following bytestring keys:
+
+topic
+   Topic name (string)
+pos
+   Current numeric position within the topic (integer)
+total
+   Total/end numeric position of this topic (unsigned integer)
+label (optional)
+   Unit label (string)
+item (optional)
+   Item name (string)
+
+Progress state is created when a frame is received referencing a
+*topic* that isn't currently tracked. Progress tracking for that
+*topic* is finished when a frame is received reporting the current
+position of that topic as ``-1``.
+
+Multiple *topics* may be active at any given time.
+
+Rendering of progress information is not mandated or governed by this
+specification: implementations MAY render progress information however
+they see fit, including not at all.
+
+The string data describing the topic SHOULD be static strings to
+facilitate receivers localizing that string data. The emitter
+MUST normalize all string data to valid UTF-8 and receivers SHOULD
+validate that received data conforms to UTF-8. The topic name
+SHOULD be ASCII.
+
+Sender Protocol Settings (``0x08``)
+-----------------------------------
+
+This frame type advertises the sender's support for various protocol and
+stream level features. The data advertised in this frame is used to influence
+subsequent behavior of the current frame exchange channel.
+
+The frame payload consists of a CBOR map. It may contain the following
+bytestring keys:
+
+contentencodings
+   (array of bytestring) A list of content encodings supported by the
+   sender, in order of most to least preferred.
+
+   Peers are allowed to encode stream data using any of the listed
+   encodings.
+
+   See the ``Content Encoding Profiles`` section for an enumeration
+   of supported content encodings.
+
+   If not defined, the value is assumed to be a list with the single value
+   ``identity``, meaning only the no-op encoding is supported.
+
+   Senders MAY filter the set of advertised encodings against what it
+   knows the receiver supports (e.g. if the receiver advertised encodings
+   via the capabilities descriptor). However, doing so will prevent
+   servers from gaining an understanding of the aggregate capabilities
+   of clients. So clients are discouraged from doing so.
+
+When this frame is not sent/received, the receiver assumes default values
+for all keys.
+
+If encountered, this frame type MUST be sent before any other frame type
+in a channel.
+
+The following flag values are defined for this frame type:
+
+0x01
+   Data continuation. When set, an additional frame containing more protocol
+   settings immediately follows.
+0x02
+   End of data. When set, the protocol settings data has been completely
+   sent.
+
+The ``0x01`` flag is mutually exclusive with the ``0x02`` flag.
+
+Stream Encoding Settings (``0x09``)
+-----------------------------------
+
+This frame type holds information defining the content encoding
+settings for a *stream*.
+
+This frame type is likely consumed by the protocol layer and is not
+passed on to applications.
+
+This frame type MUST ONLY occur on frames having the *Beginning of Stream*
+``Stream Flag`` set.
+
+The payload of this frame defines what content encoding has (possibly)
+been applied to the payloads of subsequent frames in this stream.
+
+The payload consists of a series of CBOR values. The first value is a
+bytestring denoting the content encoding profile of the data in this
+stream. Subsequent CBOR values supplement this simple value in a
+profile-specific manner. See the ``Content Encoding Profiles`` section
+for more.
+
+In the absence of this frame on a stream, it is assumed the stream is
+using the ``identity`` content encoding.
+
+The following flag values are defined for this frame type:
+
+0x01
+   Data continuation. When set, an additional frame containing more encoding
+   settings immediately follows.
+0x02
+   End of data. When set, the encoding settings data has been completely
+   sent.
+
+The ``0x01`` flag is mutually exclusive with the ``0x02`` flag.
+
+Stream States and Flags
+=======================
+
+Streams can be in two states: *open* and *closed*. An *open* stream
+is active and frames attached to that stream could arrive at any time.
+A *closed* stream is not active. If a frame attached to a *closed*
+stream arrives, that frame MUST have an appropriate stream flag
+set indicating beginning of stream. All streams are in the *closed*
+state by default.
+
+The ``Stream Flags`` field denotes a set of bit flags for defining
+the relationship of this frame within a stream. The following flags
+are defined:
+
+0x01
+   Beginning of stream. The first frame in the stream MUST set this
+   flag. When received, the ``Stream ID`` this frame is attached to
+   becomes ``open``.
+
+0x02
+   End of stream. The last frame in a stream MUST set this flag. When
+   received, the ``Stream ID`` this frame is attached to becomes
+   ``closed``. Any content encoding context associated with this stream
+   can be destroyed after processing the payload of this frame.
+
+0x04
+   Apply content encoding. When set, any content encoding settings
+   defined by the stream should be applied when attempting to read
+   the frame. When not set, the frame payload isn't encoded.
+
+TODO consider making stream opening and closing communicated via
+explicit frame types (e.g. a "stream state change" frame) rather than
+flags on all frames. This would make stream state changes more explicit,
+as they could only occur on specific frame types.
+
+Streams
+=======
+
+Streams - along with ``Request IDs`` - facilitate grouping of frames.
+But the purpose of each is quite different and the groupings they
+constitute are independent.
+
+A ``Request ID`` is essentially a tag. It tells you which logical
+request a frame is associated with.
+
+A *stream* is a sequence of frames grouped for the express purpose
+of applying a stateful encoding or for denoting sub-groups of frames.
+
+Unlike ``Request ID``s which span the request and response, a stream
+is unidirectional and stream IDs are independent from client to
+server.
+
+There is no strict hierarchical relationship between ``Request IDs``
+and *streams*. A stream can contain frames having multiple
+``Request IDs``. Frames belonging to the same ``Request ID`` can
+span multiple streams.
+
+One goal of streams is to facilitate content encoding. A stream can
+define an encoding to be applied to frame payloads. For example, the
+payload transmitted over the wire may contain output from a
+zstandard compression operation and the receiving end may decompress
+that payload to obtain the original data.
+
+The other goal of streams is to facilitate concurrent execution. For
+example, a server could spawn 4 threads to service a request that can
+be easily parallelized. Each of those 4 threads could write into its
+own stream. Those streams could then in turn be delivered to 4 threads
+on the receiving end, with each thread consuming its stream in near
+isolation. The *main* thread on both ends merely does I/O and
+encodes/decodes frame headers: the bulk of the work is done by worker
+threads.
+
+In addition, since content encoding is defined per stream, each
+*worker thread* could perform potentially CPU bound work concurrently
+with other threads. This approach of applying encoding at the
+sub-protocol / stream level eliminates a potential resource constraint
+on the protocol stream as a whole (it is common for the throughput of
+a compression engine to be smaller than the throughput of a network).
+
+Having multiple streams - each with their own encoding settings - also
+facilitates the use of advanced data compression techniques. For
+example, a transmitter could see that it is generating data faster
+and slower than the receiving end is consuming it and adjust its
+compression settings to trade CPU for compression ratio accordingly.
+
+While streams can define a content encoding, not all frames within
+that stream must use that content encoding. This can be useful when
+data is being served from caches and being derived dynamically. A
+cache could pre-compressed data so the server doesn't have to
+recompress it. The ability to pick and choose which frames are
+compressed allows servers to easily send data to the wire without
+involving potentially expensive encoding overhead.
+
+Content Encoding Profiles
+=========================
+
+Streams can have named content encoding *profiles* associated with
+them. A profile defines a shared understanding of content encoding
+settings and behavior.
+
+Profiles are described in the following sections.
+
+identity
+--------
+
+The ``identity`` profile is a no-op encoding: the encoded bytes are
+exactly the input bytes.
+
+This profile MUST be supported by all peers.
+
+In the absence of an identified profile, the ``identity`` profile is
+assumed.
+
+zstd-8mb
+--------
+
+Zstandard encoding (RFC 8478). Zstandard is a fast and effective lossless
+compression format.
+
+This profile allows decompressor window sizes of up to 8 MB.
+
+zlib
+----
+
+zlib compressed data (RFC 1950). zlib is a widely-used and supported
+lossless compression format.
+
+It isn't as fast as zstandard and it is recommended to use zstandard instead,
+if possible.
+
+Command Protocol
+================
+
+A client can request that a remote run a command by sending it
+frames defining that command. This logical stream is composed of
+1 or more ``Command Request`` frames and and 0 or more ``Command Data``
+frames.
+
+All frames composing a single command request MUST be associated with
+the same ``Request ID``.
+
+Clients MAY send additional command requests without waiting on the
+response to a previous command request. If they do so, they MUST ensure
+that the ``Request ID`` field of outbound frames does not conflict
+with that of an active ``Request ID`` whose response has not yet been
+fully received.
+
+Servers MAY respond to commands in a different order than they were
+sent over the wire. Clients MUST be prepared to deal with this. Servers
+also MAY start executing commands in a different order than they were
+received, or MAY execute multiple commands concurrently.
+
+If there is a dependency between commands or a race condition between
+commands executing (e.g. a read-only command that depends on the results
+of a command that mutates the repository), then clients MUST NOT send
+frames issuing a command until a response to all dependent commands has
+been received.
+TODO think about whether we should express dependencies between commands
+to avoid roundtrip latency.
+
+A command is defined by a command name, 0 or more command arguments,
+and optional command data.
+
+Arguments are the recommended mechanism for transferring fixed sets of
+parameters to a command. Data is appropriate for transferring variable
+data. Thinking in terms of HTTP, arguments would be headers and data
+would be the message body.
+
+It is recommended for servers to delay the dispatch of a command
+until all argument have been received. Servers MAY impose limits on the
+maximum argument size.
+TODO define failure mechanism.
+
+Servers MAY dispatch to commands immediately once argument data
+is available or delay until command data is received in full.
+
+Once a ``Command Request`` frame is sent, a client must be prepared to
+receive any of the following frames associated with that request:
+``Command Response``, ``Error Response``, ``Human Output Side-Channel``,
+``Progress Update``.
+
+The *main* response for a command will be in ``Command Response`` frames.
+The payloads of these frames consist of 1 or more CBOR encoded values.
+The first CBOR value on the first ``Command Response`` frame is special
+and denotes the overall status of the command. This CBOR map contains
+the following bytestring keys:
+
+status
+   (bytestring) A well-defined message containing the overall status of
+   this command request. The following values are defined:
+
+   ok
+      The command was received successfully and its response follows.
+   error
+      There was an error processing the command. More details about the
+      error are encoded in the ``error`` key.
+   redirect
+      The response for this command is available elsewhere. Details on
+      where are in the ``location`` key.
+
+error (optional)
+   A map containing information about an encountered error. The map has the
+   following keys:
+
+   message
+      (array of maps) A message describing the error. The message uses the
+      same format as those in the ``Human Output Side-Channel`` frame.
+
+location (optional)
+   (map) Presence indicates that a *content redirect* has occurred. The map
+   provides the external location of the content.
+
+   This map contains the following bytestring keys:
+
+   url
+      (bytestring) URL from which this content may be requested.
+
+   mediatype
+      (bytestring) The media type for the fetched content. e.g.
+      ``application/mercurial-*``.
+
+      In some transports, this value is also advertised by the transport.
+      e.g. as the ``Content-Type`` HTTP header.
+
+   size (optional)
+      (unsigned integer) Total size of remote object in bytes. This is
+      the raw size of the entity that will be fetched, minus any
+      non-Mercurial protocol encoding (e.g. HTTP content or transfer
+      encoding.)
+
+   fullhashes (optional)
+      (array of arrays) Content hashes for the entire payload. Each entry
+      is an array of bytestrings containing the hash name and the hash value.
+
+   fullhashseed (optional)
+      (bytestring) Optional seed value to feed into hasher for full content
+      hash verification.
+
+   serverdercerts (optional)
+      (array of bytestring) DER encoded x509 certificates for the server. When
+      defined, clients MAY validate that the x509 certificate on the target
+      server exactly matches the certificate used here.
+
+   servercadercerts (optional)
+      (array of bytestring) DER encoded x509 certificates for the certificate
+      authority of the target server. When defined, clients MAY validate that
+      the x509 on the target server was signed by CA certificate in this set.
+
+   # TODO support for giving client an x509 certificate pair to be used as a
+   # client certificate.
+
+   # TODO support common authentication mechanisms (e.g. HTTP basic/digest
+   # auth).
+
+   # TODO support custom authentication mechanisms. This likely requires
+   # server to advertise required auth mechanism so client can filter.
+
+   # TODO support chained hashes. e.g. hash for each 1MB segment so client
+   # can iteratively validate data without having to consume all of it first.
+
+TODO formalize when error frames can be seen and how errors can be
+recognized midway through a command response.
+
+Content Redirects
+=================
+
+Servers have the ability to respond to ANY command request with a
+*redirect* to another location. Such a response is referred to as a *redirect
+response*. (This feature is conceptually similar to HTTP redirects, but is
+more powerful.)
+
+A *redirect response* MUST ONLY be issued if the client advertises support
+for a redirect *target*.
+
+A *redirect response* MUST NOT be issued unless the client advertises support
+for one.
+
+Clients advertise support for *redirect responses* after looking at the server's
+*capabilities* data, which is fetched during initial server connection
+handshake. The server's capabilities data advertises named *targets* for
+potential redirects.
+
+Each target is described by a protocol name, connection and protocol features,
+etc. The server also advertises target-agnostic redirect settings, such as
+which hash algorithms are supported for content integrity checking. (See
+the documentation for the *capabilities* command for more.)
+
+Clients examine the set of advertised redirect targets for compatibility.
+When sending a command request, the client advertises the set of redirect
+target names it is willing to follow, along with some other settings influencing
+behavior.
+
+For example, say the server is advertising a ``cdn`` redirect target that
+requires SNI and TLS 1.2. If the client supports those features, it will
+send command requests stating that the ``cdn`` target is acceptable to use.
+But if the client doesn't support SNI or TLS 1.2 (or maybe it encountered an
+error using this target from a previous request), then it omits this target
+name.
+
+If the client advertises support for a redirect target, the server MAY
+substitute the normal, inline response data for a *redirect response* -
+one where the initial CBOR map has a ``status`` key with value ``redirect``.
+
+The *redirect response* at a minimum advertises the URL where the response
+can be retrieved.
+
+The *redirect response* MAY also advertise additional details about that
+content and how to retrieve it. Notably, the response may contain the
+x509 public certificates for the server being redirected to or the
+certificate authority that signed that server's certificate. Unless the
+client has existing settings that offer stronger trust validation than what
+the server advertises, the client SHOULD use the server-provided certificates
+when validating the connection to the remote server in place of any default
+connection verification checks. This is because certificates coming from
+the server SHOULD establish a stronger chain of trust than what the default
+certification validation mechanism in most environments provides. (By default,
+certificate validation ensures the signer of the cert chains up to a set of
+trusted root certificates. And if an explicit certificate or CA certificate
+is presented, that greadly reduces the set of certificates that will be
+recognized as valid, thus reducing the potential for a "bad" certificate
+to be used and trusted.)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/wireprotocolv2.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,724 @@
+**Experimental and under active development**
+
+This section documents the wire protocol commands exposed to transports
+using the frame-based protocol. The set of commands exposed through
+these transports is distinct from the set of commands exposed to legacy
+transports.
+
+The frame-based protocol uses CBOR to encode command execution requests.
+All command arguments must be mapped to a specific or set of CBOR data
+types.
+
+The response to many commands is also CBOR. There is no common response
+format: each command defines its own response format.
+
+TODOs
+=====
+
+* Add "node namespace" support to each command. In order to support
+  SHA-1 hash transition, we want servers to be able to expose different
+  "node namespaces" for the same data. Every command operating on nodes
+  should specify which "node namespace" it is operating on and responses
+  should encode the "node namespace" accordingly.
+
+Commands
+========
+
+The sections below detail all commands available to wire protocol version
+2.
+
+branchmap
+---------
+
+Obtain heads in named branches.
+
+Receives no arguments.
+
+The response is a map with bytestring keys defining the branch name.
+Values are arrays of bytestring defining raw changeset nodes.
+
+capabilities
+------------
+
+Obtain the server's capabilities.
+
+Receives no arguments.
+
+This command is typically called only as part of the handshake during
+initial connection establishment.
+
+The response is a map with bytestring keys defining server information.
+
+The defined keys are:
+
+commands
+   A map defining available wire protocol commands on this server.
+
+   Keys in the map are the names of commands that can be invoked. Values
+   are maps defining information about that command. The bytestring keys
+   are:
+
+      args
+         (map) Describes arguments accepted by the command.
+
+         Keys are bytestrings denoting the argument name.
+
+         Values are maps describing the argument. The map has the following
+         bytestring keys:
+
+         default
+            (varied) The default value for this argument if not specified. Only
+            present if ``required`` is not true.
+
+         required
+            (boolean) Whether the argument must be specified. Failure to send
+            required arguments will result in an error executing the command.
+
+         type
+            (bytestring) The type of the argument. e.g. ``bytes`` or ``bool``.
+
+         validvalues
+            (set) Values that are recognized for this argument. Some arguments
+            only allow a fixed set of values to be specified. These arguments
+            may advertise that set in this key. If this set is advertised and
+            a value not in this set is specified, the command should result
+            in error.
+
+      permissions
+         An array of permissions required to execute this command.
+
+      *
+         (various) Individual commands may define extra keys that supplement
+         generic command metadata. See the command definition for more.
+
+framingmediatypes
+   An array of bytestrings defining the supported framing protocol
+   media types. Servers will not accept media types not in this list.
+
+pathfilterprefixes
+   (set of bytestring) Matcher prefixes that are recognized when performing
+   path filtering. Specifying a path filter whose type/prefix does not
+   match one in this set will likely be rejected by the server.
+
+rawrepoformats
+   An array of storage formats the repository is using. This set of
+   requirements can be used to determine whether a client can read a
+   *raw* copy of file data available.
+
+redirect
+   A map declaring potential *content redirects* that may be used by this
+   server. Contains the following bytestring keys:
+
+   targets
+      (array of maps) Potential redirect targets. Values are maps describing
+      this target in more detail. Each map has the following bytestring keys:
+
+      name
+         (bytestring) Identifier for this target. The identifier will be used
+         by clients to uniquely identify this target.
+
+      protocol
+         (bytestring) High-level network protocol. Values can be
+         ``http``, ```https``, ``ssh``, etc.
+
+      uris
+          (array of bytestrings) Representative URIs for this target.
+
+      snirequired (optional)
+          (boolean) Indicates whether Server Name Indication is required
+          to use this target. Defaults to False.
+
+      tlsversions (optional)
+          (array of bytestring) Indicates which TLS versions are supported by
+          this target. Values are ``1.1``, ``1.2``, ``1.3``, etc.
+
+   hashes
+      (array of bytestring) Indicates support for hashing algorithms that are
+      used to ensure content integrity. Values include ``sha1``, ``sha256``,
+      etc.
+
+changesetdata
+-------------
+
+Obtain various data related to changesets.
+
+The command accepts the following arguments:
+
+revisions
+   (array of maps) Specifies revisions whose data is being requested. Each
+   value in the array is a map describing revisions. See the
+   *Revisions Specifiers* section below for the format of this map.
+
+   Data will be sent for the union of all revisions resolved by all
+   revision specifiers.
+
+   Only revision specifiers operating on changeset revisions are allowed.
+
+fields
+   (set of bytestring) Which data associated with changelog revisions to
+   fetch. The following values are recognized:
+
+   bookmarks
+      Bookmarks associated with a revision.
+
+   parents
+      Parent revisions.
+
+   phase
+      The phase state of a revision.
+
+   revision
+      The raw, revision data for the changelog entry. The hash of this data
+      will match the revision's node value.
+
+The response bytestream starts with a CBOR map describing the data that follows.
+This map has the following bytestring keys:
+
+totalitems
+   (unsigned integer) Total number of changelog revisions whose data is being
+   transferred. This maps to the set of revisions in the requested node
+   range, not the total number of records that follow (see below for why).
+
+Following the map header is a series of 0 or more CBOR values. If values
+are present, the first value will always be a map describing a single changeset
+revision.
+
+If the ``fieldsfollowing`` key is present, the map will immediately be followed
+by N CBOR bytestring values, where N is the number of elements in
+``fieldsfollowing``. Each bytestring value corresponds to a field denoted
+by ``fieldsfollowing``.
+
+Following the optional bytestring field values is the next revision descriptor
+map, or end of stream.
+
+Each revision descriptor map has the following bytestring keys:
+
+node
+   (bytestring) The node value for this revision. This is the SHA-1 hash of
+   the raw revision data.
+
+bookmarks (optional)
+   (array of bytestrings) Bookmarks attached to this revision. Only present
+   if ``bookmarks`` data is being requested and the revision has bookmarks
+   attached.
+
+fieldsfollowing (optional)
+   (array of 2-array) Denotes what fields immediately follow this map. Each
+   value is an array with 2 elements: the bytestring field name and an unsigned
+   integer describing the length of the data, in bytes.
+
+   If this key isn't present, no special fields will follow this map.
+
+   The following fields may be present:
+
+   revision
+      Raw, revision data for the changelog entry. Contains a serialized form
+      of the changeset data, including the author, date, commit message, set
+      of changed files, manifest node, and other metadata.
+
+      Only present if the ``revision`` field was requested.
+
+parents (optional)
+   (array of bytestrings) The nodes representing the parent revisions of this
+   revision. Only present if ``parents`` data is being requested.
+
+phase (optional)
+   (bytestring) The phase that a revision is in. Recognized values are
+   ``secret``, ``draft``, and ``public``. Only present if ``phase`` data
+   is being requested.
+
+The set of changeset revisions emitted may not match the exact set of
+changesets requested. Furthermore, the set of keys present on each
+map may vary. This is to facilitate emitting changeset updates as well
+as new revisions.
+
+For example, if the request wants ``phase`` and ``revision`` data,
+the response may contain entries for each changeset in the common nodes
+set with the ``phase`` key and without the ``revision`` key in order
+to reflect a phase-only update.
+
+TODO support different revision selection mechanisms (e.g. non-public, specific
+revisions)
+TODO support different hash "namespaces" for revisions (e.g. sha-1 versus other)
+TODO support emitting obsolescence data
+TODO support filtering based on relevant paths (narrow clone)
+TODO support hgtagsfnodes cache / tags data
+TODO support branch heads cache
+TODO consider unify query mechanism. e.g. as an array of "query descriptors"
+rather than a set of top-level arguments that have semantics when combined.
+
+filedata
+--------
+
+Obtain various data related to an individual tracked file.
+
+The command accepts the following arguments:
+
+fields
+   (set of bytestring) Which data associated with a file to fetch.
+   The following values are recognized:
+
+   linknode
+      The changeset node introducing this revision.
+
+   parents
+      Parent nodes for the revision.
+
+   revision
+      The raw revision data for a file.
+
+haveparents
+   (bool) Whether the client has the parent revisions of all requested
+   nodes. If set, the server may emit revision data as deltas against
+   any parent revision. If not set, the server MUST only emit deltas for
+   revisions previously emitted by this command.
+
+   False is assumed in the absence of any value.
+
+nodes
+   (array of bytestrings) File nodes whose data to retrieve.
+
+path
+   (bytestring) Path of the tracked file whose data to retrieve.
+
+TODO allow specifying revisions via alternate means (such as from
+changeset revisions or ranges)
+
+The response bytestream starts with a CBOR map describing the data that
+follows. It has the following bytestream keys:
+
+totalitems
+   (unsigned integer) Total number of file revisions whose data is
+   being returned.
+
+Following the map header is a series of 0 or more CBOR values. If values
+are present, the first value will always be a map describing a single changeset
+revision.
+
+If the ``fieldsfollowing`` key is present, the map will immediately be followed
+by N CBOR bytestring values, where N is the number of elements in
+``fieldsfollowing``. Each bytestring value corresponds to a field denoted
+by ``fieldsfollowing``.
+
+Following the optional bytestring field values is the next revision descriptor
+map, or end of stream.
+
+Each revision descriptor map has the following bytestring keys:
+
+Each map has the following bytestring keys:
+
+node
+   (bytestring) The node of the file revision whose data is represented.
+
+deltabasenode
+   (bytestring) Node of the file revision the following delta is against.
+
+   Only present if the ``revision`` field is requested and delta data
+   follows this map.
+
+fieldsfollowing
+   (array of 2-array) Denotes extra bytestring fields that following this map.
+   See the documentation for ``changesetdata`` for semantics.
+
+   The following named fields may be present:
+
+   ``delta``
+      The delta data to use to construct the fulltext revision.
+
+      Only present if the ``revision`` field is requested and a delta is
+      being emitted. The ``deltabasenode`` top-level key will also be
+      present if this field is being emitted.
+
+   ``revision``
+      The fulltext revision data for this manifest. Only present if the
+      ``revision`` field is requested and a fulltext revision is being emitted.
+
+parents
+   (array of bytestring) The nodes of the parents of this file revision.
+
+   Only present if the ``parents`` field is requested.
+
+When ``revision`` data is requested, the server chooses to emit either fulltext
+revision data or a delta. What the server decides can be inferred by looking
+for the presence of the ``delta`` or ``revision`` keys in the
+``fieldsfollowing`` array.
+
+filesdata
+---------
+
+Obtain various data related to multiple tracked files for specific changesets.
+
+This command is similar to ``filedata`` with the main difference being that
+individual requests operate on multiple file paths. This allows clients to
+request data for multiple paths by issuing a single command.
+
+The command accepts the following arguments:
+
+fields
+   (set of bytestring) Which data associated with a file to fetch.
+   The following values are recognized:
+
+   linknode
+      The changeset node introducing this revision.
+
+   parents
+      Parent nodes for the revision.
+
+   revision
+      The raw revision data for a file.
+
+haveparents
+   (bool) Whether the client has the parent revisions of all requested
+   nodes.
+
+pathfilter
+   (map) Defines a filter that determines what file paths are relevant.
+
+   See the *Path Filters* section for more.
+
+   If the argument is omitted, it is assumed that all paths are relevant.
+
+revisions
+   (array of maps) Specifies revisions whose data is being requested. Each value
+   in the array is a map describing revisions. See the *Revisions Specifiers*
+   section below for the format of this map.
+
+   Data will be sent for the union of all revisions resolved by all revision
+   specifiers.
+
+   Only revision specifiers operating on changeset revisions are allowed.
+
+The response bytestream starts with a CBOR map describing the data that
+follows. This map has the following bytestring keys:
+
+totalpaths
+   (unsigned integer) Total number of paths whose data is being transferred.
+
+totalitems
+   (unsigned integer) Total number of file revisions whose data is being
+   transferred.
+
+Following the map header are 0 or more sequences of CBOR values. Each sequence
+represents data for a specific tracked path. Each sequence begins with a CBOR
+map describing the file data that follows. Following that map is N CBOR values
+describing file revision data. The format of this data is identical to that
+returned by the ``filedata`` command.
+
+Each sequence's map header has the following bytestring keys:
+
+path
+   (bytestring) The tracked file path whose data follows.
+
+totalitems
+   (unsigned integer) Total number of file revisions whose data is being
+   transferred.
+
+The ``haveparents`` argument has significant implications on the data
+transferred.
+
+When ``haveparents`` is true, the command MAY only emit data for file
+revisions introduced by the set of changeset revisions whose data is being
+requested. In other words, the command may assume that all file revisions
+for all relevant paths for ancestors of the requested changeset revisions
+are present on the receiver.
+
+When ``haveparents`` is false, the command MUST assume that the receiver
+has no file revisions data. This means that all referenced file revisions
+in the queried set of changeset revisions will be sent.
+
+TODO we want a more complicated mechanism for the client to specify which
+ancestor revisions are known. This is needed so intelligent deltas can be
+emitted and so updated linknodes can be sent if the client needs to adjust
+its linknodes for existing file nodes to older changeset revisions.
+TODO we may want to make linknodes an array so multiple changesets can be
+marked as introducing a file revision, since this can occur with e.g. hidden
+changesets.
+
+heads
+-----
+
+Obtain DAG heads in the repository.
+
+The command accepts the following arguments:
+
+publiconly (optional)
+   (boolean) If set, operate on the DAG for public phase changesets only.
+   Non-public (i.e. draft) phase DAG heads will not be returned.
+
+The response is a CBOR array of bytestrings defining changeset nodes
+of DAG heads. The array can be empty if the repository is empty or no
+changesets satisfied the request.
+
+TODO consider exposing phase of heads in response
+
+known
+-----
+
+Determine whether a series of changeset nodes is known to the server.
+
+The command accepts the following arguments:
+
+nodes
+   (array of bytestrings) List of changeset nodes whose presence to
+   query.
+
+The response is a bytestring where each byte contains a 0 or 1 for the
+corresponding requested node at the same index.
+
+TODO use a bit array for even more compact response
+
+listkeys
+--------
+
+List values in a specified ``pushkey`` namespace.
+
+The command receives the following arguments:
+
+namespace
+   (bytestring) Pushkey namespace to query.
+
+The response is a map with bytestring keys and values.
+
+TODO consider using binary to represent nodes in certain pushkey namespaces.
+
+lookup
+------
+
+Try to resolve a value to a changeset revision.
+
+Unlike ``known`` which operates on changeset nodes, lookup operates on
+node fragments and other names that a user may use.
+
+The command receives the following arguments:
+
+key
+   (bytestring) Value to try to resolve.
+
+On success, returns a bytestring containing the resolved node.
+
+manifestdata
+------------
+
+Obtain various data related to manifests (which are lists of files in
+a revision).
+
+The command accepts the following arguments:
+
+fields
+   (set of bytestring) Which data associated with manifests to fetch.
+   The following values are recognized:
+
+   parents
+      Parent nodes for the manifest.
+
+   revision
+      The raw revision data for the manifest.
+
+haveparents
+   (bool) Whether the client has the parent revisions of all requested
+   nodes. If set, the server may emit revision data as deltas against
+   any parent revision. If not set, the server MUST only emit deltas for
+   revisions previously emitted by this command.
+
+   False is assumed in the absence of any value.
+
+nodes
+   (array of bytestring) Manifest nodes whose data to retrieve.
+
+tree
+   (bytestring) Path to manifest to retrieve. The empty bytestring represents
+   the root manifest. All other values represent directories/trees within
+   the repository.
+
+TODO allow specifying revisions via alternate means (such as from changeset
+revisions or ranges)
+TODO consider recursive expansion of manifests (with path filtering for
+narrow use cases)
+
+The response bytestream starts with a CBOR map describing the data that
+follows. It has the following bytestring keys:
+
+totalitems
+   (unsigned integer) Total number of manifest revisions whose data is
+   being returned.
+
+Following the map header is a series of 0 or more CBOR values. If values
+are present, the first value will always be a map describing a single manifest
+revision.
+
+If the ``fieldsfollowing`` key is present, the map will immediately be followed
+by N CBOR bytestring values, where N is the number of elements in
+``fieldsfollowing``. Each bytestring value corresponds to a field denoted
+by ``fieldsfollowing``.
+
+Following the optional bytestring field values is the next revision descriptor
+map, or end of stream.
+
+Each revision descriptor map has the following bytestring keys:
+
+node
+   (bytestring) The node of the manifest revision whose data is represented.
+
+deltabasenode
+   (bytestring) The node that the delta representation of this revision is
+   computed against. Only present if the ``revision`` field is requested and
+   a delta is being emitted.
+
+fieldsfollowing
+   (array of 2-array) Denotes extra bytestring fields that following this map.
+   See the documentation for ``changesetdata`` for semantics.
+
+   The following named fields may be present:
+
+   ``delta``
+      The delta data to use to construct the fulltext revision.
+
+      Only present if the ``revision`` field is requested and a delta is
+      being emitted. The ``deltabasenode`` top-level key will also be
+      present if this field is being emitted.
+
+   ``revision``
+      The fulltext revision data for this manifest. Only present if the
+      ``revision`` field is requested and a fulltext revision is being emitted.
+
+parents
+   (array of bytestring) The nodes of the parents of this manifest revision.
+   Only present if the ``parents`` field is requested.
+
+When ``revision`` data is requested, the server chooses to emit either fulltext
+revision data or a delta. What the server decides can be inferred by looking
+for the presence of ``delta`` or ``revision`` in the ``fieldsfollowing`` array.
+
+Servers MAY advertise the following extra fields in the capabilities
+descriptor for this command:
+
+recommendedbatchsize
+   (unsigned integer) Number of revisions the server recommends as a batch
+   query size. If defined, clients needing to issue multiple ``manifestdata``
+   commands to obtain needed data SHOULD construct their commands to have
+   this many revisions per request.
+
+pushkey
+-------
+
+Set a value using the ``pushkey`` protocol.
+
+The command receives the following arguments:
+
+namespace
+   (bytestring) Pushkey namespace to operate on.
+key
+   (bytestring) The pushkey key to set.
+old
+   (bytestring) Old value for this key.
+new
+   (bytestring) New value for this key.
+
+TODO consider using binary to represent nodes is certain pushkey namespaces.
+TODO better define response type and meaning.
+
+rawstorefiledata
+----------------
+
+Allows retrieving raw files used to store repository data.
+
+The command accepts the following arguments:
+
+files
+   (array of bytestring) Describes the files that should be retrieved.
+
+   The meaning of values in this array is dependent on the storage backend used
+   by the server.
+
+The response bytestream starts with a CBOR map describing the data that follows.
+This map has the following bytestring keys:
+
+filecount
+   (unsigned integer) Total number of files whose data is being transferred.
+
+totalsize
+   (unsigned integer) Total size in bytes of files data that will be
+   transferred. This is file on-disk size and not wire size.
+
+Following the map header are N file segments. Each file segment consists of a
+CBOR map followed by an indefinite length bytestring. Each map has the following
+bytestring keys:
+
+location
+   (bytestring) Denotes the location in the repository where the file should be
+   written. Values map to vfs instances to use for the writing.
+
+path
+   (bytestring) Path of file being transferred. Path is the raw store
+   path and can be any sequence of bytes that can be tracked in a Mercurial
+   manifest.
+
+size
+   (unsigned integer) Size of file data. This will be the final written
+   file size. The total size of the data that follows the CBOR map
+   will be greater due to encoding overhead of CBOR.
+
+TODO this command is woefully incomplete. If we are to move forward with a
+stream clone analog, it needs a lot more metadata around how to describe what
+files are available to retrieve, other semantics.
+
+Revision Specifiers
+===================
+
+A *revision specifier* is a map that evaluates to a set of revisions.
+
+A *revision specifier* has a ``type`` key that defines the revision
+selection type to perform. Other keys in the map are used in a
+type-specific manner.
+
+The following types are defined:
+
+changesetexplicit
+   An explicit set of enumerated changeset revisions.
+
+   The ``nodes`` key MUST contain an array of full binary nodes, expressed
+   as bytestrings.
+
+changesetexplicitdepth
+   Like ``changesetexplicit``, but contains a ``depth`` key defining the
+   unsigned integer number of ancestor revisions to also resolve. For each
+   value in ``nodes``, DAG ancestors will be walked until up to N total
+   revisions from that ancestry walk are present in the final resolved set.
+
+changesetdagrange
+   Defines revisions via a DAG range of changesets on the changelog.
+
+   The ``roots`` key MUST contain an array of full, binary node values
+   representing the *root* revisions.
+
+   The ``heads`` key MUST contain an array of full, binary nodes values
+   representing the *head* revisions.
+
+   The DAG range between ``roots`` and ``heads`` will be resolved and all
+   revisions between will be used. Nodes in ``roots`` are not part of the
+   resolved set. Nodes in ``heads`` are. The ``roots`` array may be empty.
+   The ``heads`` array MUST be defined.
+
+Path Filters
+============
+
+Various commands accept a *path filter* argument that defines the set of file
+paths relevant to the request.
+
+A *path filter* is defined as a map with the bytestring keys ``include`` and
+``exclude``. Each is an array of bytestring values. Each value defines a pattern
+rule (see :hg:`help patterns`) that is used to match file paths.
+
+A path matches the path filter if it is matched by a rule in the ``include``
+set but doesn't match a rule in the ``exclude`` set. In other words, a path
+matcher takes the union of all ``include`` patterns and then substracts the
+union of all ``exclude`` patterns.
+
+Patterns MUST be prefixed with their pattern type. Only the following pattern
+types are allowed: ``path:``, ``rootfilesin:``.
+
+If the ``include`` key is omitted, it is assumed that all paths are
+relevant. The patterns from ``exclude`` will still be used, if defined.
+
+An example value is ``path:tests/foo``, which would match a file named
+``tests/foo`` or a directory ``tests/foo`` and all files under it.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/merge-tools.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,109 @@
+To merge files Mercurial uses merge tools.
+
+A merge tool combines two different versions of a file into a merged
+file. Merge tools are given the two files and the greatest common
+ancestor of the two file versions, so they can determine the changes
+made on both branches.
+
+Merge tools are used both for :hg:`resolve`, :hg:`merge`, :hg:`update`,
+:hg:`backout` and in several extensions.
+
+Usually, the merge tool tries to automatically reconcile the files by
+combining all non-overlapping changes that occurred separately in
+the two different evolutions of the same initial base file. Furthermore, some
+interactive merge programs make it easier to manually resolve
+conflicting merges, either in a graphical way, or by inserting some
+conflict markers. Mercurial does not include any interactive merge
+programs but relies on external tools for that.
+
+Available merge tools
+=====================
+
+External merge tools and their properties are configured in the
+merge-tools configuration section - see hgrc(5) - but they can often just
+be named by their executable.
+
+A merge tool is generally usable if its executable can be found on the
+system and if it can handle the merge. The executable is found if it
+is an absolute or relative executable path or the name of an
+application in the executable search path. The tool is assumed to be
+able to handle the merge if it can handle symlinks if the file is a
+symlink, if it can handle binary files if the file is binary, and if a
+GUI is available if the tool requires a GUI.
+
+There are some internal merge tools which can be used. The internal
+merge tools are:
+
+.. internaltoolsmarker
+
+Internal tools are always available and do not require a GUI but will
+by default not handle symlinks or binary files. See next section for
+detail about "actual capabilities" described above.
+
+Choosing a merge tool
+=====================
+
+Mercurial uses these rules when deciding which merge tool to use:
+
+1. If a tool has been specified with the --tool option to merge or resolve, it
+   is used.  If it is the name of a tool in the merge-tools configuration, its
+   configuration is used. Otherwise the specified tool must be executable by
+   the shell.
+
+2. If the ``HGMERGE`` environment variable is present, its value is used and
+   must be executable by the shell.
+
+3. If the filename of the file to be merged matches any of the patterns in the
+   merge-patterns configuration section, the first usable merge tool
+   corresponding to a matching pattern is used.
+
+4. If ui.merge is set it will be considered next. If the value is not the name
+   of a configured tool, the specified value is used and must be executable by
+   the shell. Otherwise the named tool is used if it is usable.
+
+5. If any usable merge tools are present in the merge-tools configuration
+   section, the one with the highest priority is used.
+
+6. If a program named ``hgmerge`` can be found on the system, it is used - but
+   it will by default not be used for symlinks and binary files.
+
+7. If the file to be merged is not binary and is not a symlink, then
+   internal ``:merge`` is used.
+
+8. Otherwise, ``:prompt`` is used.
+
+For historical reason, Mercurial treats merge tools as below while
+examining rules above.
+
+==== =============== ====== =======
+step specified via   binary symlink
+==== =============== ====== =======
+1.   --tool          o/o    o/o
+2.   HGMERGE         o/o    o/o
+3.   merge-patterns  o/o(*) x/?(*)
+4.   ui.merge        x/?(*) x/?(*)
+==== =============== ====== =======
+
+Each capability column indicates Mercurial behavior for
+internal/external merge tools at examining each rule.
+
+- "o": "assume that a tool has capability"
+- "x": "assume that a tool does not have capability"
+- "?": "check actual capability of a tool"
+
+If ``merge.strict-capability-check`` configuration is true, Mercurial
+checks capabilities of merge tools strictly in (*) cases above (= each
+capability column becomes "?/?"). It is false by default for backward
+compatibility.
+
+.. note::
+
+   After selecting a merge program, Mercurial will by default attempt
+   to merge the files using a simple merge algorithm first. Only if it doesn't
+   succeed because of conflicting changes will Mercurial actually execute the
+   merge program. Whether to use the simple merge algorithm first can be
+   controlled by the premerge setting of the merge tool. Premerge is enabled by
+   default unless the file is binary or a symlink.
+
+See the merge-tools and ui sections of hgrc(5) for details on the
+configuration of merge tools.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/pager.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,43 @@
+Some Mercurial commands can produce a lot of output, and Mercurial will
+attempt to use a pager to make those commands more pleasant.
+
+To set the pager that should be used, set the application variable::
+
+  [pager]
+  pager = less -FRX
+
+If no pager is set in the user or repository configuration, Mercurial uses the
+environment variable $PAGER. If $PAGER is not set, pager.pager from the default
+or system configuration is used. If none of these are set, a default pager will
+be used, typically `less` on Unix and `more` on Windows.
+
+.. container:: windows
+
+  On Windows, `more` is not color aware, so using it effectively disables color.
+  MSYS and Cygwin shells provide `less` as a pager, which can be configured to
+  support ANSI color codes.  See :hg:`help config.color.pagermode` to configure
+  the color mode when invoking a pager.
+
+You can disable the pager for certain commands by adding them to the
+pager.ignore list::
+
+  [pager]
+  ignore = version, help, update
+
+To ignore global commands like :hg:`version` or :hg:`help`, you have
+to specify them in your user configuration file.
+
+To control whether the pager is used at all for an individual command,
+you can use --pager=<value>:
+
+  - use as needed: `auto`.
+  - require the pager: `yes` or `on`.
+  - suppress the pager: `no` or `off` (any unrecognized value
+    will also work).
+
+To globally turn off all attempts to use a pager, set::
+
+  [ui]
+  paginate = never
+
+which will prevent the pager from running.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/patterns.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,86 @@
+Mercurial accepts several notations for identifying one or more files
+at a time.
+
+By default, Mercurial treats filenames as shell-style extended glob
+patterns.
+
+Alternate pattern notations must be specified explicitly.
+
+.. note::
+
+  Patterns specified in ``.hgignore`` are not rooted.
+  Please see :hg:`help hgignore` for details.
+
+To use a plain path name without any pattern matching, start it with
+``path:``. These path names must completely match starting at the
+current repository root, and when the path points to a directory, it is matched
+recursively. To match all files in a directory non-recursively (not including
+any files in subdirectories), ``rootfilesin:`` can be used, specifying an
+absolute path (relative to the repository root).
+
+To use an extended glob, start a name with ``glob:``. Globs are rooted
+at the current directory; a glob such as ``*.c`` will only match files
+in the current directory ending with ``.c``. ``rootglob:`` can be used
+instead of ``glob:`` for a glob that is rooted at the root of the
+repository.
+
+The supported glob syntax extensions are ``**`` to match any string
+across path separators and ``{a,b}`` to mean "a or b".
+
+To use a Perl/Python regular expression, start a name with ``re:``.
+Regexp pattern matching is anchored at the root of the repository.
+
+To read name patterns from a file, use ``listfile:`` or ``listfile0:``.
+The latter expects null delimited patterns while the former expects line
+feeds. Each string read from the file is itself treated as a file
+pattern.
+
+To read a set of patterns from a file, use ``include:`` or ``subinclude:``.
+``include:`` will use all the patterns from the given file and treat them as if
+they had been passed in manually.  ``subinclude:`` will only apply the patterns
+against files that are under the subinclude file's directory. See :hg:`help
+hgignore` for details on the format of these files.
+
+All patterns, except for ``glob:`` specified in command line (not for
+``-I`` or ``-X`` options), can match also against directories: files
+under matched directories are treated as matched.
+For ``-I`` and ``-X`` options, ``glob:`` will match directories recursively.
+
+Plain examples::
+
+  path:foo/bar        a name bar in a directory named foo in the root
+                      of the repository
+  path:path:name      a file or directory named "path:name"
+  rootfilesin:foo/bar the files in a directory called foo/bar, but not any files
+                      in its subdirectories and not a file bar in directory foo
+
+Glob examples::
+
+  glob:*.c       any name ending in ".c" in the current directory
+  *.c            any name ending in ".c" in the current directory
+  **.c           any name ending in ".c" in any subdirectory of the
+                 current directory including itself.
+  foo/*          any file in directory foo
+  foo/**         any file in directory foo plus all its subdirectories,
+                 recursively
+  foo/*.c        any name ending in ".c" in the directory foo
+  foo/**.c       any name ending in ".c" in any subdirectory of foo
+                 including itself.
+  rootglob:*.c   any name ending in ".c" in the root of the repository
+
+Regexp examples::
+
+  re:.*\.c$      any name ending in ".c", anywhere in the repository
+
+File examples::
+
+  listfile:list.txt  read list from list.txt with one file pattern per line
+  listfile0:list.txt read list from list.txt with null byte delimiters
+
+See also :hg:`help filesets`.
+
+Include examples::
+
+  include:path/to/mypatternfile    reads patterns to be applied to all paths
+  subinclude:path/to/subignorefile reads patterns specifically for paths in the
+                                   subdirectory
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/phases.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,100 @@
+What are phases?
+================
+
+Phases are a system for tracking which changesets have been or should
+be shared. This helps prevent common mistakes when modifying history
+(for instance, with the mq or rebase extensions).
+
+Each changeset in a repository is in one of the following phases:
+
+ - public : changeset is visible on a public server
+ - draft : changeset is not yet published
+ - secret : changeset should not be pushed, pulled, or cloned
+
+These phases are ordered (public < draft < secret) and no changeset
+can be in a lower phase than its ancestors. For instance, if a
+changeset is public, all its ancestors are also public. Lastly,
+changeset phases should only be changed towards the public phase.
+
+How are phases managed?
+=======================
+
+For the most part, phases should work transparently. By default, a
+changeset is created in the draft phase and is moved into the public
+phase when it is pushed to another repository.
+
+Once changesets become public, extensions like mq and rebase will
+refuse to operate on them to prevent creating duplicate changesets.
+Phases can also be manually manipulated with the :hg:`phase` command
+if needed. See :hg:`help -v phase` for examples.
+
+To make your commits secret by default, put this in your
+configuration file::
+
+  [phases]
+  new-commit = secret
+
+Phases and servers
+==================
+
+Normally, all servers are ``publishing`` by default. This means::
+
+ - all draft changesets that are pulled or cloned appear in phase
+ public on the client
+
+ - all draft changesets that are pushed appear as public on both
+ client and server
+
+ - secret changesets are neither pushed, pulled, or cloned
+
+.. note::
+
+  Pulling a draft changeset from a publishing server does not mark it
+  as public on the server side due to the read-only nature of pull.
+
+Sometimes it may be desirable to push and pull changesets in the draft
+phase to share unfinished work. This can be done by setting a
+repository to disable publishing in its configuration file::
+
+  [phases]
+  publish = False
+
+See :hg:`help config` for more information on configuration files.
+
+.. note::
+
+  Servers running older versions of Mercurial are treated as
+  publishing.
+
+.. note::
+
+   Changesets in secret phase are not exchanged with the server. This
+   applies to their content: file names, file contents, and changeset
+   metadata. For technical reasons, the identifier (e.g. d825e4025e39)
+   of the secret changeset may be communicated to the server.
+
+
+Examples
+========
+
+ - list changesets in draft or secret phase::
+
+     hg log -r "not public()"
+
+ - change all secret changesets to draft::
+
+     hg phase --draft "secret()"
+
+ - forcibly move the current changeset and descendants from public to draft::
+
+     hg phase --force --draft .
+
+ - show a list of changeset revisions and each corresponding phase::
+
+     hg log --template "{rev} {phase}\n"
+
+ - resynchronize draft changesets relative to a remote repository::
+
+     hg phase -fd "outgoing(URL)"
+
+See :hg:`help phase` for more information on manually manipulating phases.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/revisions.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,223 @@
+Mercurial supports several ways to specify revisions.
+
+Specifying single revisions
+===========================
+
+A plain integer is treated as a revision number. Negative integers are
+treated as sequential offsets from the tip, with -1 denoting the tip,
+-2 denoting the revision prior to the tip, and so forth.
+
+A 40-digit hexadecimal string is treated as a unique revision identifier.
+A hexadecimal string less than 40 characters long is treated as a
+unique revision identifier and is referred to as a short-form
+identifier. A short-form identifier is only valid if it is the prefix
+of exactly one full-length identifier.
+
+Any other string is treated as a bookmark, tag, or branch name. A
+bookmark is a movable pointer to a revision. A tag is a permanent name
+associated with a revision. A branch name denotes the tipmost open branch head
+of that branch - or if they are all closed, the tipmost closed head of the
+branch. Bookmark, tag, and branch names must not contain the ":" character.
+
+The reserved name "tip" always identifies the most recent revision.
+
+The reserved name "null" indicates the null revision. This is the
+revision of an empty repository, and the parent of revision 0.
+
+The reserved name "." indicates the working directory parent. If no
+working directory is checked out, it is equivalent to null. If an
+uncommitted merge is in progress, "." is the revision of the first
+parent.
+
+Finally, commands that expect a single revision (like ``hg update``) also
+accept revsets (see below for details). When given a revset, they use the
+last revision of the revset. A few commands accept two single revisions
+(like ``hg diff``). When given a revset, they use the first and the last
+revisions of the revset.
+
+Specifying multiple revisions
+=============================
+
+Mercurial supports a functional language for selecting a set of
+revisions. Expressions in this language are called revsets.
+
+The language supports a number of predicates which are joined by infix
+operators. Parenthesis can be used for grouping.
+
+Identifiers such as branch names may need quoting with single or
+double quotes if they contain characters like ``-`` or if they match
+one of the predefined predicates.
+
+Special characters can be used in quoted identifiers by escaping them,
+e.g., ``\n`` is interpreted as a newline. To prevent them from being
+interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``.
+
+Operators
+=========
+
+There is a single prefix operator:
+
+``not x``
+  Changesets not in x. Short form is ``! x``.
+
+These are the supported infix operators:
+
+``x::y``
+  A DAG range, meaning all changesets that are descendants of x and
+  ancestors of y, including x and y themselves. If the first endpoint
+  is left out, this is equivalent to ``ancestors(y)``, if the second
+  is left out it is equivalent to ``descendants(x)``.
+
+  An alternative syntax is ``x..y``.
+
+``x:y``
+  All changesets with revision numbers between x and y, both
+  inclusive. Either endpoint can be left out, they default to 0 and
+  tip.
+
+``x and y``
+  The intersection of changesets in x and y. Short form is ``x & y``.
+
+``x or y``
+  The union of changesets in x and y. There are two alternative short
+  forms: ``x | y`` and ``x + y``.
+
+``x - y``
+  Changesets in x but not in y.
+
+``x % y``
+  Changesets that are ancestors of x but not ancestors of y (i.e. ::x - ::y).
+  This is shorthand notation for ``only(x, y)`` (see below). The second
+  argument is optional and, if left out, is equivalent to ``only(x)``.
+
+``x^n``
+  The nth parent of x, n == 0, 1, or 2.
+  For n == 0, x; for n == 1, the first parent of each changeset in x;
+  for n == 2, the second parent of changeset in x.
+
+``x~n``
+  The nth first ancestor of x; ``x~0`` is x; ``x~3`` is ``x^^^``.
+  For n < 0, the nth unambiguous descendent of x.
+
+``x ## y``
+  Concatenate strings and identifiers into one string.
+
+  All other prefix, infix and postfix operators have lower priority than
+  ``##``. For example, ``a1 ## a2~2`` is equivalent to ``(a1 ## a2)~2``.
+
+  For example::
+
+    [revsetalias]
+    issue(a1) = grep(r'\bissue[ :]?' ## a1 ## r'\b|\bbug\(' ## a1 ## r'\)')
+
+  ``issue(1234)`` is equivalent to
+  ``grep(r'\bissue[ :]?1234\b|\bbug\(1234\)')``
+  in this case. This matches against all of "issue 1234", "issue:1234",
+  "issue1234" and "bug(1234)".
+
+There is a single postfix operator:
+
+``x^``
+  Equivalent to ``x^1``, the first parent of each changeset in x.
+
+Patterns
+========
+
+Where noted, predicates that perform string matching can accept a pattern
+string. The pattern may be either a literal, or a regular expression. If the
+pattern starts with ``re:``, the remainder of the pattern is treated as a
+regular expression. Otherwise, it is treated as a literal. To match a pattern
+that actually starts with ``re:``, use the prefix ``literal:``.
+
+Matching is case-sensitive, unless otherwise noted.  To perform a case-
+insensitive match on a case-sensitive predicate, use a regular expression,
+prefixed with ``(?i)``.
+
+For example, ``tag(r're:(?i)release')`` matches "release" or "RELEASE"
+or "Release", etc.
+
+Predicates
+==========
+
+The following predicates are supported:
+
+.. predicatesmarker
+
+Aliases
+=======
+
+New predicates (known as "aliases") can be defined, using any combination of
+existing predicates or other aliases. An alias definition looks like::
+
+  <alias> = <definition>
+
+in the ``revsetalias`` section of a Mercurial configuration file. Arguments
+of the form `a1`, `a2`, etc. are substituted from the alias into the
+definition.
+
+For example,
+
+::
+
+  [revsetalias]
+  h = heads()
+  d(s) = sort(s, date)
+  rs(s, k) = reverse(sort(s, k))
+
+defines three aliases, ``h``, ``d``, and ``rs``. ``rs(0:tip, author)`` is
+exactly equivalent to ``reverse(sort(0:tip, author))``.
+
+Equivalents
+===========
+
+Command line equivalents for :hg:`log`::
+
+  -f    ->  ::.
+  -d x  ->  date(x)
+  -k x  ->  keyword(x)
+  -m    ->  merge()
+  -u x  ->  user(x)
+  -b x  ->  branch(x)
+  -P x  ->  !::x
+  -l x  ->  limit(expr, x)
+
+Examples
+========
+
+Some sample queries:
+
+- Changesets on the default branch::
+
+    hg log -r "branch(default)"
+
+- Changesets on the default branch since tag 1.5 (excluding merges)::
+
+    hg log -r "branch(default) and 1.5:: and not merge()"
+
+- Open branch heads::
+
+    hg log -r "head() and not closed()"
+
+- Changesets between tags 1.3 and 1.5 mentioning "bug" that affect
+  ``hgext/*``::
+
+    hg log -r "1.3::1.5 and keyword(bug) and file('hgext/*')"
+
+- Changesets committed in May 2008, sorted by user::
+
+    hg log -r "sort(date('May 2008'), user)"
+
+- Changesets mentioning "bug" or "issue" that are not in a tagged
+  release::
+
+    hg log -r "(keyword(bug) or keyword(issue)) and not ancestors(tag())"
+
+- Update to the commit that bookmark @ is pointing to, without activating the
+  bookmark (this works because the last revision of the revset is used)::
+
+    hg update :@
+
+- Show diff between tags 1.3 and 1.5 (this works because the first and the
+  last revisions of the revset are used)::
+
+    hg diff -r 1.3::1.5
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/scripting.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,210 @@
+It is common for machines (as opposed to humans) to consume Mercurial.
+This help topic describes some of the considerations for interfacing
+machines with Mercurial.
+
+Choosing an Interface
+=====================
+
+Machines have a choice of several methods to interface with Mercurial.
+These include:
+
+- Executing the ``hg`` process
+- Querying a HTTP server
+- Calling out to a command server
+
+Executing ``hg`` processes is very similar to how humans interact with
+Mercurial in the shell. It should already be familiar to you.
+
+:hg:`serve` can be used to start a server. By default, this will start
+a "hgweb" HTTP server. This HTTP server has support for machine-readable
+output, such as JSON. For more, see :hg:`help hgweb`.
+
+:hg:`serve` can also start a "command server." Clients can connect
+to this server and issue Mercurial commands over a special protocol.
+For more details on the command server, including links to client
+libraries, see https://www.mercurial-scm.org/wiki/CommandServer.
+
+:hg:`serve` based interfaces (the hgweb and command servers) have the
+advantage over simple ``hg`` process invocations in that they are
+likely more efficient. This is because there is significant overhead
+to spawn new Python processes.
+
+.. tip::
+
+   If you need to invoke several ``hg`` processes in short order and/or
+   performance is important to you, use of a server-based interface
+   is highly recommended.
+
+Environment Variables
+=====================
+
+As documented in :hg:`help environment`, various environment variables
+influence the operation of Mercurial. The following are particularly
+relevant for machines consuming Mercurial:
+
+HGPLAIN
+    If not set, Mercurial's output could be influenced by configuration
+    settings that impact its encoding, verbose mode, localization, etc.
+
+    It is highly recommended for machines to set this variable when
+    invoking ``hg`` processes.
+
+HGENCODING
+    If not set, the locale used by Mercurial will be detected from the
+    environment. If the determined locale does not support display of
+    certain characters, Mercurial may render these character sequences
+    incorrectly (often by using "?" as a placeholder for invalid
+    characters in the current locale).
+
+    Explicitly setting this environment variable is a good practice to
+    guarantee consistent results. "utf-8" is a good choice on UNIX-like
+    environments.
+
+HGRCPATH
+    If not set, Mercurial will inherit config options from config files
+    using the process described in :hg:`help config`. This includes
+    inheriting user or system-wide config files.
+
+    When utmost control over the Mercurial configuration is desired, the
+    value of ``HGRCPATH`` can be set to an explicit file with known good
+    configs. In rare cases, the value can be set to an empty file or the
+    null device (often ``/dev/null``) to bypass loading of any user or
+    system config files. Note that these approaches can have unintended
+    consequences, as the user and system config files often define things
+    like the username and extensions that may be required to interface
+    with a repository.
+
+HGRCSKIPREPO
+    When set, the .hg/hgrc from repositories are not read.
+
+    Note that not reading the repository's configuration can have
+    unintended consequences, as the repository config files can define
+    things like extensions that are required for access to the
+    repository.
+
+Command-line Flags
+==================
+
+Mercurial's default command-line parser is designed for humans, and is not
+robust against malicious input. For instance, you can start a debugger by
+passing ``--debugger`` as an option value::
+
+    $ REV=--debugger sh -c 'hg log -r "$REV"'
+
+This happens because several command-line flags need to be scanned without
+using a concrete command table, which may be modified while loading repository
+settings and extensions.
+
+Since Mercurial 4.4.2, the parsing of such flags may be restricted by setting
+``HGPLAIN=+strictflags``. When this feature is enabled, all early options
+(e.g. ``-R/--repository``, ``--cwd``, ``--config``) must be specified first
+amongst the other global options, and cannot be injected to an arbitrary
+location::
+
+    $ HGPLAIN=+strictflags hg -R "$REPO" log -r "$REV"
+
+In earlier Mercurial versions where ``+strictflags`` isn't available, you
+can mitigate the issue by concatenating an option value with its flag::
+
+    $ hg log -r"$REV" --keyword="$KEYWORD"
+
+Consuming Command Output
+========================
+
+It is common for machines to need to parse the output of Mercurial
+commands for relevant data. This section describes the various
+techniques for doing so.
+
+Parsing Raw Command Output
+--------------------------
+
+Likely the simplest and most effective solution for consuming command
+output is to simply invoke ``hg`` commands as you would as a user and
+parse their output.
+
+The output of many commands can easily be parsed with tools like
+``grep``, ``sed``, and ``awk``.
+
+A potential downside with parsing command output is that the output
+of commands can change when Mercurial is upgraded. While Mercurial
+does generally strive for strong backwards compatibility, command
+output does occasionally change. Having tests for your automated
+interactions with ``hg`` commands is generally recommended, but is
+even more important when raw command output parsing is involved.
+
+Using Templates to Control Output
+---------------------------------
+
+Many ``hg`` commands support templatized output via the
+``-T/--template`` argument. For more, see :hg:`help templates`.
+
+Templates are useful for explicitly controlling output so that
+you get exactly the data you want formatted how you want it. For
+example, ``log -T {node}\n`` can be used to print a newline
+delimited list of changeset nodes instead of a human-tailored
+output containing authors, dates, descriptions, etc.
+
+.. tip::
+
+   If parsing raw command output is too complicated, consider
+   using templates to make your life easier.
+
+The ``-T/--template`` argument allows specifying pre-defined styles.
+Mercurial ships with the machine-readable styles ``cbor``, ``json``,
+and ``xml``, which provide CBOR, JSON, and XML output, respectively.
+These are useful for producing output that is machine readable as-is.
+
+(Mercurial 5.0 is required for CBOR style.)
+
+.. important::
+
+   The ``json`` and ``xml`` styles are considered experimental. While
+   they may be attractive to use for easily obtaining machine-readable
+   output, their behavior may change in subsequent versions.
+
+   These styles may also exhibit unexpected results when dealing with
+   certain encodings. Mercurial treats things like filenames as a
+   series of bytes and normalizing certain byte sequences to JSON
+   or XML with certain encoding settings can lead to surprises.
+
+Command Server Output
+---------------------
+
+If using the command server to interact with Mercurial, you are likely
+using an existing library/API that abstracts implementation details of
+the command server. If so, this interface layer may perform parsing for
+you, saving you the work of implementing it yourself.
+
+Output Verbosity
+----------------
+
+Commands often have varying output verbosity, even when machine
+readable styles are being used (e.g. ``-T json``). Adding
+``-v/--verbose`` and ``--debug`` to the command's arguments can
+increase the amount of data exposed by Mercurial.
+
+An alternate way to get the data you need is by explicitly specifying
+a template.
+
+Other Topics
+============
+
+revsets
+   Revisions sets is a functional query language for selecting a set
+   of revisions. Think of it as SQL for Mercurial repositories. Revsets
+   are useful for querying repositories for specific data.
+
+   See :hg:`help revsets` for more.
+
+share extension
+   The ``share`` extension provides functionality for sharing
+   repository data across several working copies. It can even
+   automatically "pool" storage for logically related repositories when
+   cloning.
+
+   Configuring the ``share`` extension can lead to significant resource
+   utilization reduction, particularly around disk space and the
+   network. This is especially true for continuous integration (CI)
+   environments.
+
+   See :hg:`help -e share` for more.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/subrepos.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,171 @@
+Subrepositories let you nest external repositories or projects into a
+parent Mercurial repository, and make commands operate on them as a
+group.
+
+Mercurial currently supports Mercurial, Git, and Subversion
+subrepositories.
+
+Subrepositories are made of three components:
+
+1. Nested repository checkouts. They can appear anywhere in the
+   parent working directory.
+
+2. Nested repository references. They are defined in ``.hgsub``, which
+   should be placed in the root of working directory, and
+   tell where the subrepository checkouts come from. Mercurial
+   subrepositories are referenced like::
+
+     path/to/nested = https://example.com/nested/repo/path
+
+   Git and Subversion subrepos are also supported::
+
+     path/to/nested = [git]git://example.com/nested/repo/path
+     path/to/nested = [svn]https://example.com/nested/trunk/path
+
+   where ``path/to/nested`` is the checkout location relatively to the
+   parent Mercurial root, and ``https://example.com/nested/repo/path``
+   is the source repository path. The source can also reference a
+   filesystem path.
+
+   Note that ``.hgsub`` does not exist by default in Mercurial
+   repositories, you have to create and add it to the parent
+   repository before using subrepositories.
+
+3. Nested repository states. They are defined in ``.hgsubstate``, which
+   is placed in the root of working directory, and
+   capture whatever information is required to restore the
+   subrepositories to the state they were committed in a parent
+   repository changeset. Mercurial automatically record the nested
+   repositories states when committing in the parent repository.
+
+   .. note::
+
+      The ``.hgsubstate`` file should not be edited manually.
+
+
+Adding a Subrepository
+======================
+
+If ``.hgsub`` does not exist, create it and add it to the parent
+repository. Clone or checkout the external projects where you want it
+to live in the parent repository. Edit ``.hgsub`` and add the
+subrepository entry as described above. At this point, the
+subrepository is tracked and the next commit will record its state in
+``.hgsubstate`` and bind it to the committed changeset.
+
+Synchronizing a Subrepository
+=============================
+
+Subrepos do not automatically track the latest changeset of their
+sources. Instead, they are updated to the changeset that corresponds
+with the changeset checked out in the top-level changeset. This is so
+developers always get a consistent set of compatible code and
+libraries when they update.
+
+Thus, updating subrepos is a manual process. Simply check out target
+subrepo at the desired revision, test in the top-level repo, then
+commit in the parent repository to record the new combination.
+
+Deleting a Subrepository
+========================
+
+To remove a subrepository from the parent repository, delete its
+reference from ``.hgsub``, then remove its files.
+
+Interaction with Mercurial Commands
+===================================
+
+:add: add does not recurse in subrepos unless -S/--subrepos is
+    specified.  However, if you specify the full path of a file in a
+    subrepo, it will be added even without -S/--subrepos specified.
+    Subversion subrepositories are currently silently
+    ignored.
+
+:addremove: addremove does not recurse into subrepos unless
+    -S/--subrepos is specified.  However, if you specify the full
+    path of a directory in a subrepo, addremove will be performed on
+    it even without -S/--subrepos being specified.  Git and
+    Subversion subrepositories will print a warning and continue.
+
+:archive: archive does not recurse in subrepositories unless
+    -S/--subrepos is specified.
+
+:cat: Git subrepositories only support exact file matches.
+    Subversion subrepositories are currently ignored.
+
+:commit: commit creates a consistent snapshot of the state of the
+    entire project and its subrepositories. If any subrepositories
+    have been modified, Mercurial will abort.  Mercurial can be made
+    to instead commit all modified subrepositories by specifying
+    -S/--subrepos, or setting "ui.commitsubrepos=True" in a
+    configuration file (see :hg:`help config`).  After there are no
+    longer any modified subrepositories, it records their state and
+    finally commits it in the parent repository.  The --addremove
+    option also honors the -S/--subrepos option.  However, Git and
+    Subversion subrepositories will print a warning and abort.
+
+:diff: diff does not recurse in subrepos unless -S/--subrepos is
+    specified.  However, if you specify the full path of a file or
+    directory in a subrepo, it will be diffed even without
+    -S/--subrepos being specified.  Subversion subrepositories are
+    currently silently ignored.
+
+:files: files does not recurse into subrepos unless -S/--subrepos is
+    specified.  However, if you specify the full path of a file or
+    directory in a subrepo, it will be displayed even without
+    -S/--subrepos being specified.  Git and Subversion subrepositories
+    are currently silently ignored.
+
+:forget: forget currently only handles exact file matches in subrepos.
+    Git and Subversion subrepositories are currently silently ignored.
+
+:incoming: incoming does not recurse in subrepos unless -S/--subrepos
+    is specified. Git and Subversion subrepositories are currently
+    silently ignored.
+
+:outgoing: outgoing does not recurse in subrepos unless -S/--subrepos
+    is specified. Git and Subversion subrepositories are currently
+    silently ignored.
+
+:pull: pull is not recursive since it is not clear what to pull prior
+    to running :hg:`update`. Listing and retrieving all
+    subrepositories changes referenced by the parent repository pulled
+    changesets is expensive at best, impossible in the Subversion
+    case.
+
+:push: Mercurial will automatically push all subrepositories first
+    when the parent repository is being pushed. This ensures new
+    subrepository changes are available when referenced by top-level
+    repositories.  Push is a no-op for Subversion subrepositories.
+
+:serve: serve does not recurse into subrepositories unless
+    -S/--subrepos is specified.  Git and Subversion subrepositories
+    are currently silently ignored.
+
+:status: status does not recurse into subrepositories unless
+    -S/--subrepos is specified. Subrepository changes are displayed as
+    regular Mercurial changes on the subrepository
+    elements. Subversion subrepositories are currently silently
+    ignored.
+
+:remove: remove does not recurse into subrepositories unless
+    -S/--subrepos is specified.  However, if you specify a file or
+    directory path in a subrepo, it will be removed even without
+    -S/--subrepos.  Git and Subversion subrepositories are currently
+    silently ignored.
+
+:update: update restores the subrepos in the state they were
+    originally committed in target changeset. If the recorded
+    changeset is not available in the current subrepository, Mercurial
+    will pull it in first before updating.  This means that updating
+    can require network access when using subrepositories.
+
+Remapping Subrepositories Sources
+=================================
+
+A subrepository source location may change during a project life,
+invalidating references stored in the parent repository history. To
+fix this, rewriting rules can be defined in parent repository ``hgrc``
+file or in Mercurial configuration. See the ``[subpaths]`` section in
+hgrc(5) for more details.
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/templates.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,215 @@
+Mercurial allows you to customize output of commands through
+templates. You can either pass in a template or select an existing
+template-style from the command line, via the --template option.
+
+You can customize output for any "log-like" command: log,
+outgoing, incoming, tip, parents, and heads.
+
+Some built-in styles are packaged with Mercurial. These can be listed
+with :hg:`log --template list`. Example usage::
+
+    $ hg log -r1.0::1.1 --template changelog
+
+A template is a piece of text, with markup to invoke variable
+expansion::
+
+    $ hg log -r1 --template "{node}\n"
+    b56ce7b07c52de7d5fd79fb89701ea538af65746
+
+Keywords
+========
+
+Strings in curly braces are called keywords. The availability of
+keywords depends on the exact context of the templater. These
+keywords are usually available for templating a log-like command:
+
+.. keywordsmarker
+
+The "date" keyword does not produce human-readable output. If you
+want to use a date in your output, you can use a filter to process
+it. Filters are functions which return a string based on the input
+variable. Be sure to use the stringify filter first when you're
+applying a string-input filter to a list-like input variable.
+You can also use a chain of filters to get the desired output::
+
+   $ hg tip --template "{date|isodate}\n"
+   2008-08-21 18:22 +0000
+
+Filters
+=======
+
+List of filters:
+
+.. filtersmarker
+
+Note that a filter is nothing more than a function call, i.e.
+``expr|filter`` is equivalent to ``filter(expr)``.
+
+Functions
+=========
+
+In addition to filters, there are some basic built-in functions:
+
+.. functionsmarker
+
+Operators
+=========
+
+We provide a limited set of infix arithmetic operations on integers::
+
+  + for addition
+  - for subtraction
+  * for multiplication
+  / for floor division (division rounded to integer nearest -infinity)
+
+Division fulfills the law x = x / y + mod(x, y).
+
+Also, for any expression that returns a list, there is a list operator::
+
+    expr % "{template}"
+
+As seen in the above example, ``{template}`` is interpreted as a template.
+To prevent it from being interpreted, you can use an escape character ``\{``
+or a raw string prefix, ``r'...'``.
+
+The dot operator can be used as a shorthand for accessing a sub item:
+
+- ``expr.member`` is roughly equivalent to ``expr % '{member}'`` if ``expr``
+  returns a non-list/dict. The returned value is not stringified.
+- ``dict.key`` is identical to ``get(dict, 'key')``.
+
+Aliases
+=======
+
+New keywords and functions can be defined in the ``templatealias`` section of
+a Mercurial configuration file::
+
+  <alias> = <definition>
+
+Arguments of the form `a1`, `a2`, etc. are substituted from the alias into
+the definition.
+
+For example,
+
+::
+
+  [templatealias]
+  r = rev
+  rn = "{r}:{node|short}"
+  leftpad(s, w) = pad(s, w, ' ', True)
+
+defines two symbol aliases, ``r`` and ``rn``, and a function alias
+``leftpad()``.
+
+It's also possible to specify complete template strings, using the
+``templates`` section. The syntax used is the general template string syntax.
+
+For example,
+
+::
+
+  [templates]
+  nodedate = "{node|short}: {date(date, "%Y-%m-%d")}\n"
+
+defines a template, ``nodedate``, which can be called like::
+
+  $ hg log -r . -Tnodedate
+
+A template defined in ``templates`` section can also be referenced from
+another template::
+
+  $ hg log -r . -T "{rev} {nodedate}"
+
+but be aware that the keywords cannot be overridden by templates. For example,
+a template defined as ``templates.rev`` cannot be referenced as ``{rev}``.
+
+A template defined in ``templates`` section may have sub templates which
+are inserted before/after/between items::
+
+  [templates]
+  myjson = ' {dict(rev, node|short)|json}'
+  myjson:docheader = '\{\n'
+  myjson:docfooter = '\n}\n'
+  myjson:separator = ',\n'
+
+Examples
+========
+
+Some sample command line templates:
+
+- Format lists, e.g. files::
+
+   $ hg log -r 0 --template "files:\n{files % '  {file}\n'}"
+
+- Join the list of files with a ", "::
+
+   $ hg log -r 0 --template "files: {join(files, ', ')}\n"
+
+- Join the list of files ending with ".py" with a ", "::
+
+   $ hg log -r 0 --template "pythonfiles: {join(files('**.py'), ', ')}\n"
+
+- Separate non-empty arguments by a " "::
+
+   $ hg log -r 0 --template "{separate(' ', node, bookmarks, tags}\n"
+
+- Modify each line of a commit description::
+
+   $ hg log --template "{splitlines(desc) % '**** {line}\n'}"
+
+- Format date::
+
+   $ hg log -r 0 --template "{date(date, '%Y')}\n"
+
+- Display date in UTC::
+
+   $ hg log -r 0 --template "{localdate(date, 'UTC')|date}\n"
+
+- Output the description set to a fill-width of 30::
+
+   $ hg log -r 0 --template "{fill(desc, 30)}"
+
+- Use a conditional to test for the default branch::
+
+   $ hg log -r 0 --template "{ifeq(branch, 'default', 'on the main branch',
+   'on branch {branch}')}\n"
+
+- Append a newline if not empty::
+
+   $ hg tip --template "{if(author, '{author}\n')}"
+
+- Label the output for use with the color extension::
+
+   $ hg log -r 0 --template "{label('changeset.{phase}', node|short)}\n"
+
+- Invert the firstline filter, i.e. everything but the first line::
+
+   $ hg log -r 0 --template "{sub(r'^.*\n?\n?', '', desc)}\n"
+
+- Display the contents of the 'extra' field, one per line::
+
+   $ hg log -r 0 --template "{join(extras, '\n')}\n"
+
+- Mark the active bookmark with '*'::
+
+   $ hg log --template "{bookmarks % '{bookmark}{ifeq(bookmark, active, '*')} '}\n"
+
+- Find the previous release candidate tag, the distance and changes since the tag::
+
+   $ hg log -r . --template "{latesttag('re:^.*-rc$') % '{tag}, {changes}, {distance}'}\n"
+
+- Mark the working copy parent with '@'::
+
+   $ hg log --template "{ifcontains(rev, revset('.'), '@')}\n"
+
+- Show details of parent revisions::
+
+   $ hg log --template "{revset('parents(%d)', rev) % '{desc|firstline}\n'}"
+
+- Show only commit descriptions that start with "template"::
+
+   $ hg log --template "{startswith('template', firstline(desc))}\n"
+
+- Print the first word of each line of a commit message::
+
+   $ hg log --template "{word(0, desc)}\n"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/urls.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,66 @@
+Valid URLs are of the form::
+
+  local/filesystem/path[#revision]
+  file://local/filesystem/path[#revision]
+  http://[user[:pass]@]host[:port]/[path][#revision]
+  https://[user[:pass]@]host[:port]/[path][#revision]
+  ssh://[user@]host[:port]/[path][#revision]
+
+Paths in the local filesystem can either point to Mercurial
+repositories or to bundle files (as created by :hg:`bundle` or
+:hg:`incoming --bundle`). See also :hg:`help paths`.
+
+An optional identifier after # indicates a particular branch, tag, or
+changeset to use from the remote repository. See also :hg:`help
+revisions`.
+
+Some features, such as pushing to http:// and https:// URLs are only
+possible if the feature is explicitly enabled on the remote Mercurial
+server.
+
+Note that the security of HTTPS URLs depends on proper configuration of
+web.cacerts.
+
+Some notes about using SSH with Mercurial:
+
+- SSH requires an accessible shell account on the destination machine
+  and a copy of hg in the remote path or specified with remotecmd.
+- path is relative to the remote user's home directory by default. Use
+  an extra slash at the start of a path to specify an absolute path::
+
+    ssh://example.com//tmp/repository
+
+- Mercurial doesn't use its own compression via SSH; the right thing
+  to do is to configure it in your ~/.ssh/config, e.g.::
+
+    Host *.mylocalnetwork.example.com
+      Compression no
+    Host *
+      Compression yes
+
+  Alternatively specify "ssh -C" as your ssh command in your
+  configuration file or with the --ssh command line option.
+
+These URLs can all be stored in your configuration file with path
+aliases under the [paths] section like so::
+
+  [paths]
+  alias1 = URL1
+  alias2 = URL2
+  ...
+
+You can then use the alias for any command that uses a URL (for
+example :hg:`pull alias1` will be treated as :hg:`pull URL1`).
+
+Two path aliases are special because they are used as defaults when
+you do not provide the URL to a command:
+
+default:
+  When you create a repository with hg clone, the clone command saves
+  the location of the source repository as the new repository's
+  'default' path. This is then used when you omit path from push- and
+  pull-like commands (including incoming and outgoing).
+
+default-push:
+  The push command will look for a path named 'default-push', and
+  prefer it over 'default' if both are defined.
--- a/mercurial/hg.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hg.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,7 +9,6 @@
 from __future__ import absolute_import
 
 import errno
-import hashlib
 import os
 import shutil
 import stat
@@ -48,7 +47,7 @@
     verify as verifymod,
     vfs as vfsmod,
 )
-
+from .utils import hashutil
 from .interfaces import repository as repositorymod
 
 release = lock.release
@@ -738,7 +737,7 @@
                 )
         elif sharenamemode == b'remote':
             sharepath = os.path.join(
-                sharepool, node.hex(hashlib.sha1(source).digest())
+                sharepool, node.hex(hashutil.sha1(source).digest())
             )
         else:
             raise error.Abort(
@@ -1345,7 +1344,7 @@
 
 
 def remoteui(src, opts):
-    b'build a remote ui from ui or repo and opts'
+    """build a remote ui from ui or repo and opts"""
     if util.safehasattr(src, b'baseui'):  # looks like a repository
         dst = src.baseui.copy()  # drop repo-specific config
         src = src.ui  # copy target options from repo
--- a/mercurial/hgweb/__init__.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hgweb/__init__.py	Tue Jan 21 13:14:51 2020 -0500
@@ -77,19 +77,19 @@
         else:
             prefix = b''
 
-        port = r':%d' % self.httpd.port
-        if port == r':80':
-            port = r''
+        port = ':%d' % self.httpd.port
+        if port == ':80':
+            port = ''
 
         bindaddr = self.httpd.addr
-        if bindaddr == r'0.0.0.0':
-            bindaddr = r'*'
-        elif r':' in bindaddr:  # IPv6
-            bindaddr = r'[%s]' % bindaddr
+        if bindaddr == '0.0.0.0':
+            bindaddr = '*'
+        elif ':' in bindaddr:  # IPv6
+            bindaddr = '[%s]' % bindaddr
 
         fqaddr = self.httpd.fqaddr
-        if r':' in fqaddr:
-            fqaddr = r'[%s]' % fqaddr
+        if ':' in fqaddr:
+            fqaddr = '[%s]' % fqaddr
 
         url = b'http://%s%s/%s' % (
             pycompat.sysbytes(fqaddr),
--- a/mercurial/hgweb/common.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hgweb/common.py	Tue Jan 21 13:14:51 2020 -0500
@@ -143,9 +143,7 @@
 
 def _statusmessage(code):
     responses = httpserver.basehttprequesthandler.responses
-    return pycompat.bytesurl(
-        responses.get(code, (r'Error', r'Unknown error'))[0]
-    )
+    return pycompat.bytesurl(responses.get(code, ('Error', 'Unknown error'))[0])
 
 
 def statusmessage(code, message=None):
--- a/mercurial/hgweb/hgwebdir_mod.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hgweb/hgwebdir_mod.py	Tue Jan 21 13:14:51 2020 -0500
@@ -32,6 +32,7 @@
     error,
     extensions,
     hg,
+    pathutil,
     profiling,
     pycompat,
     registrar,
@@ -436,7 +437,7 @@
             def _virtualdirs():
                 # Check the full virtual path, and each parent
                 yield virtual
-                for p in util.finddirs(virtual):
+                for p in pathutil.finddirs(virtual):
                     yield p
 
             for virtualrepo in _virtualdirs():
@@ -485,7 +486,7 @@
             )
             return res.sendresponse()
         finally:
-            tmpl = None
+            del tmpl
 
     def makeindex(self, req, res, tmpl, subdir=b""):
         self.refresh()
--- a/mercurial/hgweb/server.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hgweb/server.py	Tue Jan 21 13:14:51 2020 -0500
@@ -43,8 +43,8 @@
     Just like CGI environment, the path is unquoted, the query is
     not.
     """
-    if r'?' in uri:
-        path, query = uri.split(r'?', 1)
+    if '?' in uri:
+        path, query = uri.split('?', 1)
     else:
         path, query = uri, r''
     return urlreq.unquote(path), query
@@ -62,7 +62,7 @@
 
     def writelines(self, seq):
         for msg in seq:
-            self.handler.log_error(r"HG error:  %s", encoding.strfromlocal(msg))
+            self.handler.log_error("HG error:  %s", encoding.strfromlocal(msg))
 
 
 class _httprequesthandler(httpservermod.basehttprequesthandler):
@@ -97,18 +97,18 @@
     def log_message(self, format, *args):
         self._log_any(self.server.accesslog, format, *args)
 
-    def log_request(self, code=r'-', size=r'-'):
+    def log_request(self, code='-', size='-'):
         xheaders = []
         if util.safehasattr(self, b'headers'):
             xheaders = [
-                h for h in self.headers.items() if h[0].startswith(r'x-')
+                h for h in self.headers.items() if h[0].startswith('x-')
             ]
         self.log_message(
-            r'"%s" %s %s%s',
+            '"%s" %s %s%s',
             self.requestline,
             str(code),
             str(size),
-            r''.join([r' %s:%s' % h for h in sorted(xheaders)]),
+            ''.join([' %s:%s' % h for h in sorted(xheaders)]),
         )
 
     def do_write(self):
@@ -128,20 +128,20 @@
                 isinstance(e, (OSError, socket.error))
                 and e.errno == errno.ECONNRESET
             ):
-                tb = r"".join(traceback.format_exception(*sys.exc_info()))
+                tb = "".join(traceback.format_exception(*sys.exc_info()))
                 # We need a native-string newline to poke in the log
                 # message, because we won't get a newline when using an
                 # r-string. This is the easy way out.
                 newline = chr(10)
                 self.log_error(
                     r"Exception happened during processing "
-                    r"request '%s':%s%s",
+                    "request '%s':%s%s",
                     self.path,
                     newline,
                     tb,
                 )
 
-            self._start_response(r"500 Internal Server Error", [])
+            self._start_response("500 Internal Server Error", [])
             self._write(b"Internal Server Error")
             self._done()
 
@@ -160,72 +160,72 @@
             self.server.prefix + b'/'
         ):
             self._start_response(pycompat.strurl(common.statusmessage(404)), [])
-            if self.command == r'POST':
+            if self.command == 'POST':
                 # Paranoia: tell the client we're going to close the
                 # socket so they don't try and reuse a socket that
                 # might have a POST body waiting to confuse us. We do
                 # this by directly munging self.saved_headers because
                 # self._start_response ignores Connection headers.
-                self.saved_headers = [(r'Connection', r'Close')]
+                self.saved_headers = [('Connection', 'Close')]
             self._write(b"Not Found")
             self._done()
             return
 
         env = {}
-        env[r'GATEWAY_INTERFACE'] = r'CGI/1.1'
-        env[r'REQUEST_METHOD'] = self.command
-        env[r'SERVER_NAME'] = self.server.server_name
-        env[r'SERVER_PORT'] = str(self.server.server_port)
-        env[r'REQUEST_URI'] = self.path
-        env[r'SCRIPT_NAME'] = pycompat.sysstr(self.server.prefix)
-        env[r'PATH_INFO'] = pycompat.sysstr(path[len(self.server.prefix) :])
-        env[r'REMOTE_HOST'] = self.client_address[0]
-        env[r'REMOTE_ADDR'] = self.client_address[0]
-        env[r'QUERY_STRING'] = query or r''
+        env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+        env['REQUEST_METHOD'] = self.command
+        env['SERVER_NAME'] = self.server.server_name
+        env['SERVER_PORT'] = str(self.server.server_port)
+        env['REQUEST_URI'] = self.path
+        env['SCRIPT_NAME'] = pycompat.sysstr(self.server.prefix)
+        env['PATH_INFO'] = pycompat.sysstr(path[len(self.server.prefix) :])
+        env['REMOTE_HOST'] = self.client_address[0]
+        env['REMOTE_ADDR'] = self.client_address[0]
+        env['QUERY_STRING'] = query or ''
 
         if pycompat.ispy3:
             if self.headers.get_content_type() is None:
-                env[r'CONTENT_TYPE'] = self.headers.get_default_type()
+                env['CONTENT_TYPE'] = self.headers.get_default_type()
             else:
-                env[r'CONTENT_TYPE'] = self.headers.get_content_type()
-            length = self.headers.get(r'content-length')
+                env['CONTENT_TYPE'] = self.headers.get_content_type()
+            length = self.headers.get('content-length')
         else:
             if self.headers.typeheader is None:
-                env[r'CONTENT_TYPE'] = self.headers.type
+                env['CONTENT_TYPE'] = self.headers.type
             else:
-                env[r'CONTENT_TYPE'] = self.headers.typeheader
-            length = self.headers.getheader(r'content-length')
+                env['CONTENT_TYPE'] = self.headers.typeheader
+            length = self.headers.getheader('content-length')
         if length:
-            env[r'CONTENT_LENGTH'] = length
+            env['CONTENT_LENGTH'] = length
         for header in [
             h
             for h in self.headers.keys()
-            if h.lower() not in (r'content-type', r'content-length')
+            if h.lower() not in ('content-type', 'content-length')
         ]:
-            hkey = r'HTTP_' + header.replace(r'-', r'_').upper()
+            hkey = 'HTTP_' + header.replace('-', '_').upper()
             hval = self.headers.get(header)
-            hval = hval.replace(r'\n', r'').strip()
+            hval = hval.replace('\n', '').strip()
             if hval:
                 env[hkey] = hval
-        env[r'SERVER_PROTOCOL'] = self.request_version
-        env[r'wsgi.version'] = (1, 0)
-        env[r'wsgi.url_scheme'] = pycompat.sysstr(self.url_scheme)
-        if env.get(r'HTTP_EXPECT', b'').lower() == b'100-continue':
+        env['SERVER_PROTOCOL'] = self.request_version
+        env['wsgi.version'] = (1, 0)
+        env['wsgi.url_scheme'] = pycompat.sysstr(self.url_scheme)
+        if env.get('HTTP_EXPECT', b'').lower() == b'100-continue':
             self.rfile = common.continuereader(self.rfile, self.wfile.write)
 
-        env[r'wsgi.input'] = self.rfile
-        env[r'wsgi.errors'] = _error_logger(self)
-        env[r'wsgi.multithread'] = isinstance(
+        env['wsgi.input'] = self.rfile
+        env['wsgi.errors'] = _error_logger(self)
+        env['wsgi.multithread'] = isinstance(
             self.server, socketserver.ThreadingMixIn
         )
         if util.safehasattr(socketserver, b'ForkingMixIn'):
-            env[r'wsgi.multiprocess'] = isinstance(
+            env['wsgi.multiprocess'] = isinstance(
                 self.server, socketserver.ForkingMixIn
             )
         else:
-            env[r'wsgi.multiprocess'] = False
+            env['wsgi.multiprocess'] = False
 
-        env[r'wsgi.run_once'] = 0
+        env['wsgi.run_once'] = 0
 
         wsgiref.validate.check_environ(env)
 
@@ -251,17 +251,16 @@
         self._chunked = False
         for h in self.saved_headers:
             self.send_header(*h)
-            if h[0].lower() == r'content-length':
+            if h[0].lower() == 'content-length':
                 self.length = int(h[1])
         if self.length is None and saved_status[0] != common.HTTP_NOT_MODIFIED:
             self._chunked = (
-                not self.close_connection
-                and self.request_version == r'HTTP/1.1'
+                not self.close_connection and self.request_version == 'HTTP/1.1'
             )
             if self._chunked:
-                self.send_header(r'Transfer-Encoding', r'chunked')
+                self.send_header('Transfer-Encoding', 'chunked')
             else:
-                self.send_header(r'Connection', r'close')
+                self.send_header('Connection', 'close')
         self.end_headers()
         self.sent_headers = True
 
@@ -270,7 +269,7 @@
         code, msg = http_status.split(None, 1)
         code = int(code)
         self.saved_status = http_status
-        bad_headers = (r'connection', r'transfer-encoding')
+        bad_headers = ('connection', 'transfer-encoding')
         self.saved_headers = [
             h for h in headers if h[0].lower() not in bad_headers
         ]
@@ -335,8 +334,8 @@
 
     def setup(self):
         self.connection = self.request
-        self.rfile = self.request.makefile(r"rb", self.rbufsize)
-        self.wfile = self.request.makefile(r"wb", self.wbufsize)
+        self.rfile = self.request.makefile("rb", self.rbufsize)
+        self.wfile = self.request.makefile("wb", self.wbufsize)
 
 
 try:
--- a/mercurial/hgweb/webcommands.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hgweb/webcommands.py	Tue Jan 21 13:14:51 2020 -0500
@@ -1268,7 +1268,6 @@
     name = b"%s-%s" % (reponame, arch_version)
 
     ctx = webutil.changectx(web.repo, web.req)
-    pats = []
     match = scmutil.match(ctx, [])
     file = web.req.qsparams.get(b'file')
     if file:
--- a/mercurial/hgweb/webutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hgweb/webutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -541,8 +541,15 @@
 
 def _listfilesgen(context, ctx, stripecount):
     parity = paritygen(stripecount)
+    filesadded = ctx.filesadded()
     for blockno, f in enumerate(ctx.files()):
-        template = b'filenodelink' if f in ctx else b'filenolink'
+        if f not in ctx:
+            status = b'removed'
+        elif f in filesadded:
+            status = b'added'
+        else:
+            status = b'modified'
+        template = b'filenolink' if status == b'removed' else b'filenodelink'
         yield context.process(
             template,
             {
@@ -550,6 +557,7 @@
                 b'file': f,
                 b'blockno': blockno + 1,
                 b'parity': next(parity),
+                b'status': status,
             },
         )
 
--- a/mercurial/hgweb/wsgicgi.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hgweb/wsgicgi.py	Tue Jan 21 13:14:51 2020 -0500
@@ -25,28 +25,28 @@
     procutil.setbinary(procutil.stdout)
 
     environ = dict(pycompat.iteritems(os.environ))  # re-exports
-    environ.setdefault(r'PATH_INFO', b'')
-    if environ.get(r'SERVER_SOFTWARE', r'').startswith(r'Microsoft-IIS'):
+    environ.setdefault('PATH_INFO', b'')
+    if environ.get('SERVER_SOFTWARE', '').startswith('Microsoft-IIS'):
         # IIS includes script_name in PATH_INFO
-        scriptname = environ[r'SCRIPT_NAME']
-        if environ[r'PATH_INFO'].startswith(scriptname):
-            environ[r'PATH_INFO'] = environ[r'PATH_INFO'][len(scriptname) :]
+        scriptname = environ['SCRIPT_NAME']
+        if environ['PATH_INFO'].startswith(scriptname):
+            environ['PATH_INFO'] = environ['PATH_INFO'][len(scriptname) :]
 
     stdin = procutil.stdin
-    if environ.get(r'HTTP_EXPECT', r'').lower() == r'100-continue':
+    if environ.get('HTTP_EXPECT', '').lower() == '100-continue':
         stdin = common.continuereader(stdin, procutil.stdout.write)
 
-    environ[r'wsgi.input'] = stdin
-    environ[r'wsgi.errors'] = procutil.stderr
-    environ[r'wsgi.version'] = (1, 0)
-    environ[r'wsgi.multithread'] = False
-    environ[r'wsgi.multiprocess'] = True
-    environ[r'wsgi.run_once'] = True
+    environ['wsgi.input'] = stdin
+    environ['wsgi.errors'] = procutil.stderr
+    environ['wsgi.version'] = (1, 0)
+    environ['wsgi.multithread'] = False
+    environ['wsgi.multiprocess'] = True
+    environ['wsgi.run_once'] = True
 
-    if environ.get(r'HTTPS', r'off').lower() in (r'on', r'1', r'yes'):
-        environ[r'wsgi.url_scheme'] = r'https'
+    if environ.get('HTTPS', 'off').lower() in ('on', '1', 'yes'):
+        environ['wsgi.url_scheme'] = 'https'
     else:
-        environ[r'wsgi.url_scheme'] = r'http'
+        environ['wsgi.url_scheme'] = 'http'
 
     headers_set = []
     headers_sent = []
@@ -77,7 +77,7 @@
                     # Re-raise original exception if headers sent
                     raise exc_info[0](exc_info[1], exc_info[2])
             finally:
-                exc_info = None  # avoid dangling circular ref
+                del exc_info  # avoid dangling circular ref
         elif headers_set:
             raise AssertionError(b"Headers already set!")
 
--- a/mercurial/hgweb/wsgiheaders.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hgweb/wsgiheaders.py	Tue Jan 21 13:14:51 2020 -0500
@@ -13,7 +13,7 @@
 
 import re
 
-tspecials = re.compile(br'[ \(\)<>@,;:\\"/\[\]\?=]')
+tspecials = re.compile(br'[ ()<>@,;:\\"/\[\]?=]')
 
 
 def _formatparam(param, value=None, quote=1):
@@ -129,7 +129,7 @@
         return self._headers[:]
 
     def __repr__(self):
-        return r"%s(%r)" % (self.__class__.__name__, self._headers)
+        return "%s(%r)" % (self.__class__.__name__, self._headers)
 
     def __str__(self):
         """str() returns the formatted headers, complete with end line,
--- a/mercurial/hook.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/hook.py	Tue Jan 21 13:14:51 2020 -0500
@@ -22,6 +22,7 @@
 )
 from .utils import (
     procutil,
+    resourceutil,
     stringutil,
 )
 
@@ -38,7 +39,7 @@
 
     if callable(funcname):
         obj = funcname
-        funcname = pycompat.sysbytes(obj.__module__ + r"." + obj.__name__)
+        funcname = pycompat.sysbytes(obj.__module__ + "." + obj.__name__)
     else:
         d = funcname.rfind(b'.')
         if d == -1:
@@ -48,7 +49,7 @@
             )
         modname = funcname[:d]
         oldpaths = sys.path
-        if procutil.mainfrozen():
+        if resourceutil.mainfrozen():
             # binary installs require sys.path manipulation
             modpath, modfile = os.path.split(modname)
             if modpath and modfile:
@@ -61,7 +62,7 @@
                 e1 = sys.exc_info()
                 try:
                     # extensions are loaded with hgext_ prefix
-                    obj = __import__(r"hgext_%s" % pycompat.sysstr(modname))
+                    obj = __import__("hgext_%s" % pycompat.sysstr(modname))
                 except (ImportError, SyntaxError):
                     e2 = sys.exc_info()
                     if ui.tracebackflag:
--- a/mercurial/httppeer.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/httppeer.py	Tue Jan 21 13:14:51 2020 -0500
@@ -63,7 +63,7 @@
     # and using an r-string to make it portable between Python 2 and 3
     # doesn't work because then the \r is a literal backslash-r
     # instead of a carriage return.
-    valuelen = limit - len(fmt % r'000') - len(b': \r\n')
+    valuelen = limit - len(fmt % '000') - len(b': \r\n')
     result = []
 
     n = 0
@@ -158,7 +158,7 @@
             argsio = io.BytesIO(strargs)
             argsio.length = len(strargs)
             data = _multifile(argsio, data)
-        headers[r'X-HgArgs-Post'] = len(strargs)
+        headers['X-HgArgs-Post'] = len(strargs)
     elif args:
         # Calling self.capable() can infinite loop if we are calling
         # "capabilities". But that command should never accept wire
@@ -187,8 +187,8 @@
         size = data.length
     elif data is not None:
         size = len(data)
-    if data is not None and r'Content-Type' not in headers:
-        headers[r'Content-Type'] = r'application/mercurial-0.1'
+    if data is not None and 'Content-Type' not in headers:
+        headers['Content-Type'] = 'application/mercurial-0.1'
 
     # Tell the server we accept application/mercurial-0.2 and multiple
     # compression formats if the server is capable of emitting those
@@ -228,17 +228,17 @@
 
     varyheaders = []
     for header in headers:
-        if header.lower().startswith(r'x-hg'):
+        if header.lower().startswith('x-hg'):
             varyheaders.append(header)
 
     if varyheaders:
-        headers[r'Vary'] = r','.join(sorted(varyheaders))
+        headers['Vary'] = ','.join(sorted(varyheaders))
 
     req = requestbuilder(pycompat.strurl(cu), data, headers)
 
     if data is not None:
         ui.debug(b"sending %d bytes\n" % size)
-        req.add_unredirected_header(r'Content-Length', r'%d' % size)
+        req.add_unredirected_header('Content-Length', '%d' % size)
 
     return req, cu, qs
 
@@ -348,9 +348,9 @@
             ui.warn(_(b'real URL is %s\n') % respurl)
 
     try:
-        proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
+        proto = pycompat.bytesurl(resp.getheader('content-type', ''))
     except AttributeError:
-        proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
+        proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
 
     safeurl = util.hidepassword(baseurl)
     if proto.startswith(b'application/hg-error'):
@@ -517,7 +517,7 @@
 
         tempname = bundle2.writebundle(self.ui, cg, None, type)
         fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
-        headers = {r'Content-Type': r'application/mercurial-0.1'}
+        headers = {'Content-Type': 'application/mercurial-0.1'}
 
         try:
             r = self._call(cmd, data=fp, headers=headers, **args)
@@ -543,14 +543,14 @@
         try:
             # dump bundle to disk
             fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
-            with os.fdopen(fd, r"wb") as fh:
+            with os.fdopen(fd, "wb") as fh:
                 d = fp.read(4096)
                 while d:
                     fh.write(d)
                     d = fp.read(4096)
             # start http push
             with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
-                headers = {r'Content-Type': r'application/mercurial-0.1'}
+                headers = {'Content-Type': 'application/mercurial-0.1'}
                 return self._callstream(cmd, data=fp_, headers=headers, **args)
         finally:
             if filename is not None:
@@ -621,12 +621,12 @@
 
     # TODO modify user-agent to reflect v2
     headers = {
-        r'Accept': wireprotov2server.FRAMINGTYPE,
-        r'Content-Type': wireprotov2server.FRAMINGTYPE,
+        'Accept': wireprotov2server.FRAMINGTYPE,
+        'Content-Type': wireprotov2server.FRAMINGTYPE,
     }
 
     req = requestbuilder(pycompat.strurl(url), body, headers)
-    req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
+    req.add_unredirected_header('Content-Length', '%d' % len(body))
 
     try:
         res = opener.open(req)
@@ -965,7 +965,7 @@
 
     if advertisev2:
         args[b'headers'] = {
-            r'X-HgProto-1': r'cbor',
+            'X-HgProto-1': 'cbor',
         }
 
         args[b'headers'].update(
--- a/mercurial/i18n.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/i18n.py	Tue Jan 21 13:14:51 2020 -0500
@@ -13,6 +13,7 @@
 import sys
 
 from .pycompat import getattr
+from .utils import resourceutil
 from . import (
     encoding,
     pycompat,
@@ -45,18 +46,14 @@
         # ctypes not found or unknown langid
         pass
 
-_ugettext = None
 
-
-def setdatapath(datapath):
-    datapath = pycompat.fsdecode(datapath)
-    localedir = os.path.join(datapath, r'locale')
-    t = gettextmod.translation(r'hg', localedir, _languages, fallback=True)
-    global _ugettext
-    try:
-        _ugettext = t.ugettext
-    except AttributeError:
-        _ugettext = t.gettext
+datapath = pycompat.fsdecode(resourceutil.datapath)
+localedir = os.path.join(datapath, 'locale')
+t = gettextmod.translation('hg', localedir, _languages, fallback=True)
+try:
+    _ugettext = t.ugettext
+except AttributeError:
+    _ugettext = t.gettext
 
 
 _msgcache = {}  # encoding: {message: translation}
--- a/mercurial/interfaces/dirstate.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/interfaces/dirstate.py	Tue Jan 21 13:14:51 2020 -0500
@@ -24,7 +24,7 @@
         """Return a list of files containing patterns to ignore."""
 
     def _ignorefileandline(f):
-        b"Given a file `f`, return the ignore file and line that ignores it."
+        """Given a file `f`, return the ignore file and line that ignores it."""
 
     _checklink = interfaceutil.Attribute("""Callable for checking symlinks.""")
     _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""")
--- a/mercurial/interfaces/repository.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/interfaces/repository.py	Tue Jan 21 13:14:51 2020 -0500
@@ -878,7 +878,9 @@
 
         If individual revisions cannot have their revision content resolved,
         the method is expected to set the ``skipread`` key to a set of nodes
-        that encountered problems.
+        that encountered problems.  If set, the method can also add the node(s)
+        to ``safe_renamed`` in order to indicate nodes that may perform the
+        rename checks with currently accessible data.
 
         The method yields objects conforming to the ``iverifyproblem``
         interface.
@@ -1746,7 +1748,7 @@
     def currentwlock():
         """Return the wlock if it's held or None."""
 
-    def checkcommitpatterns(wctx, vdirs, match, status, fail):
+    def checkcommitpatterns(wctx, match, status, fail):
         pass
 
     def commit(
--- a/mercurial/keepalive.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/keepalive.py	Tue Jan 21 13:14:51 2020 -0500
@@ -331,9 +331,9 @@
         headers.update(sorted(req.unredirected_hdrs.items()))
         headers = util.sortdict((n.lower(), v) for n, v in headers.items())
         skipheaders = {}
-        for n in (r'host', r'accept-encoding'):
+        for n in ('host', 'accept-encoding'):
             if n in headers:
-                skipheaders[r'skip_' + n.replace(r'-', r'_')] = 1
+                skipheaders['skip_' + n.replace('-', '_')] = 1
         try:
             if urllibcompat.hasdata(req):
                 data = urllibcompat.getdata(req)
@@ -342,12 +342,12 @@
                     urllibcompat.getselector(req),
                     **skipheaders
                 )
-                if r'content-type' not in headers:
+                if 'content-type' not in headers:
                     h.putheader(
-                        r'Content-type', r'application/x-www-form-urlencoded'
+                        'Content-type', 'application/x-www-form-urlencoded'
                     )
-                if r'content-length' not in headers:
-                    h.putheader(r'Content-length', r'%d' % len(data))
+                if 'content-length' not in headers:
+                    h.putheader('Content-length', '%d' % len(data))
             else:
                 h.putrequest(
                     req.get_method(),
@@ -401,8 +401,8 @@
     def __init__(self, sock, debuglevel=0, strict=0, method=None):
         extrakw = {}
         if not pycompat.ispy3:
-            extrakw[r'strict'] = True
-            extrakw[r'buffering'] = True
+            extrakw['strict'] = True
+            extrakw['buffering'] = True
         httplib.HTTPResponse.__init__(
             self, sock, debuglevel=debuglevel, method=method, **extrakw
         )
--- a/mercurial/linelog.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/linelog.py	Tue Jan 21 13:14:51 2020 -0500
@@ -8,7 +8,7 @@
 
 SCCS Weaves are an implementation of
 https://en.wikipedia.org/wiki/Interleaved_deltas. See
-mercurial/help/internals/linelog.txt for an exploration of SCCS weaves
+mercurial/helptext/internals/linelog.txt for an exploration of SCCS weaves
 and how linelog works in detail.
 
 Here's a hacker's summary: a linelog is a program which is executed in
@@ -53,7 +53,7 @@
         return iter(self.lines)
 
 
-class _llinstruction(object):
+class _llinstruction(object):  # pytype: disable=ignored-metaclass
 
     __metaclass__ = abc.ABCMeta
 
@@ -99,7 +99,7 @@
         self._target = op2
 
     def __str__(self):
-        return r'JGE %d %d' % (self._cmprev, self._target)
+        return 'JGE %d %d' % (self._cmprev, self._target)
 
     def __eq__(self, other):
         return (
@@ -126,7 +126,7 @@
         self._target = op2
 
     def __str__(self):
-        return r'JUMP %d' % (self._target)
+        return 'JUMP %d' % (self._target)
 
     def __eq__(self, other):
         return type(self) == type(other) and self._target == other._target
@@ -168,7 +168,7 @@
         self._target = op2
 
     def __str__(self):
-        return r'JL %d %d' % (self._cmprev, self._target)
+        return 'JL %d %d' % (self._cmprev, self._target)
 
     def __eq__(self, other):
         return (
@@ -196,7 +196,7 @@
         self._origlineno = op2
 
     def __str__(self):
-        return r'LINE %d %d' % (self._rev, self._origlineno)
+        return 'LINE %d %d' % (self._rev, self._origlineno)
 
     def __eq__(self, other):
         return (
@@ -262,7 +262,7 @@
         )
 
     def debugstr(self):
-        fmt = r'%%%dd %%s' % len(str(len(self._program)))
+        fmt = '%%%dd %%s' % len(str(len(self._program)))
         return pycompat.sysstr(b'\n').join(
             fmt % (idx, i) for idx, i in enumerate(self._program[1:], 1)
         )
@@ -278,8 +278,14 @@
         fakejge = _decodeone(buf, 0)
         if isinstance(fakejge, _jump):
             maxrev = 0
+        elif isinstance(fakejge, (_jge, _jl)):
+            maxrev = fakejge._cmprev
         else:
-            maxrev = fakejge._cmprev
+            raise LineLogError(
+                'Expected one of _jump, _jge, or _jl. Got %s.'
+                % type(fakejge).__name__
+            )
+        assert isinstance(fakejge, (_jump, _jge, _jl))  # help pytype
         numentries = fakejge._target
         if expected != numentries:
             raise LineLogError(
--- a/mercurial/localrepo.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/localrepo.py	Tue Jan 21 13:14:51 2020 -0500
@@ -8,7 +8,6 @@
 from __future__ import absolute_import
 
 import errno
-import hashlib
 import os
 import random
 import sys
@@ -74,6 +73,7 @@
 )
 
 from .utils import (
+    hashutil,
     procutil,
     stringutil,
 )
@@ -676,6 +676,8 @@
     configs are loaded. For example, an extension may wish to pull in
     configs from alternate files or sources.
     """
+    if b'HGRCSKIPREPO' in encoding.environ:
+        return False
     try:
         ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
         return True
@@ -927,6 +929,9 @@
     if repository.NARROW_REQUIREMENT in requirements:
         options[b'enableellipsis'] = True
 
+    if ui.configbool(b'experimental', b'rust.index'):
+        options[b'rust.index'] = True
+
     return options
 
 
@@ -1514,11 +1519,71 @@
         narrowspec.save(self, newincludes, newexcludes)
         self.invalidate(clearfilecache=True)
 
+    @unfilteredpropertycache
+    def _quick_access_changeid_null(self):
+        return {
+            b'null': (nullrev, nullid),
+            nullrev: (nullrev, nullid),
+            nullid: (nullrev, nullid),
+        }
+
+    @unfilteredpropertycache
+    def _quick_access_changeid_wc(self):
+        # also fast path access to the working copy parents
+        # however, only do it for filter that ensure wc is visible.
+        quick = {}
+        cl = self.unfiltered().changelog
+        for node in self.dirstate.parents():
+            if node == nullid:
+                continue
+            rev = cl.index.get_rev(node)
+            if rev is None:
+                # unknown working copy parent case:
+                #
+                #   skip the fast path and let higher code deal with it
+                continue
+            pair = (rev, node)
+            quick[rev] = pair
+            quick[node] = pair
+            # also add the parents of the parents
+            for r in cl.parentrevs(rev):
+                if r == nullrev:
+                    continue
+                n = cl.node(r)
+                pair = (r, n)
+                quick[r] = pair
+                quick[n] = pair
+        p1node = self.dirstate.p1()
+        if p1node != nullid:
+            quick[b'.'] = quick[p1node]
+        return quick
+
+    @unfilteredmethod
+    def _quick_access_changeid_invalidate(self):
+        if '_quick_access_changeid_wc' in vars(self):
+            del self.__dict__['_quick_access_changeid_wc']
+
+    @property
+    def _quick_access_changeid(self):
+        """an helper dictionnary for __getitem__ calls
+
+        This contains a list of symbol we can recognise right away without
+        further processing.
+        """
+        mapping = self._quick_access_changeid_null
+        if self.filtername in repoview.filter_has_wc:
+            mapping = mapping.copy()
+            mapping.update(self._quick_access_changeid_wc)
+        return mapping
+
     def __getitem__(self, changeid):
+        # dealing with special cases
         if changeid is None:
             return context.workingctx(self)
         if isinstance(changeid, context.basectx):
             return changeid
+
+        # dealing with multiple revisions
         if isinstance(changeid, slice):
             # wdirrev isn't contiguous so the slice shouldn't include it
             return [
@@ -1526,16 +1591,22 @@
                 for i in pycompat.xrange(*changeid.indices(len(self)))
                 if i not in self.changelog.filteredrevs
             ]
+
+        # dealing with some special values
+        quick_access = self._quick_access_changeid.get(changeid)
+        if quick_access is not None:
+            rev, node = quick_access
+            return context.changectx(self, rev, node, maybe_filtered=False)
+        if changeid == b'tip':
+            node = self.changelog.tip()
+            rev = self.changelog.rev(node)
+            return context.changectx(self, rev, node)
+
+        # dealing with arbitrary values
         try:
             if isinstance(changeid, int):
                 node = self.changelog.node(changeid)
                 rev = changeid
-            elif changeid == b'null':
-                node = nullid
-                rev = nullrev
-            elif changeid == b'tip':
-                node = self.changelog.tip()
-                rev = self.changelog.rev(node)
             elif changeid == b'.':
                 # this is a hack to delay/avoid loading obsmarkers
                 # when we know that '.' won't be hidden
@@ -1619,7 +1690,7 @@
         user aliases, consider calling ``scmutil.revrange()`` or
         ``repo.anyrevs([expr], user=True)``.
 
-        Returns a revset.abstractsmartset, which is a list-like interface
+        Returns a smartset.abstractsmartset, which is a list-like interface
         that contains integer revisions.
         '''
         tree = revsetlang.spectree(expr, *args)
@@ -1645,6 +1716,12 @@
         definitions overriding user aliases, set ``localalias`` to
         ``{name: definitionstring}``.
         '''
+        if specs == [b'null']:
+            return revset.baseset([nullrev])
+        if specs == [b'.']:
+            quick_data = self._quick_access_changeid.get(b'.')
+            if quick_data is not None:
+                return revset.baseset([quick_data[0]])
         if user:
             m = revset.matchany(
                 self.ui,
@@ -1823,11 +1900,11 @@
 
     def known(self, nodes):
         cl = self.changelog
-        nm = cl.nodemap
+        get_rev = cl.index.get_rev
         filtered = cl.filteredrevs
         result = []
         for n in nodes:
-            r = nm.get(n)
+            r = get_rev(n)
             resp = not (r is None or r in filtered)
             result.append(resp)
         return result
@@ -1859,20 +1936,8 @@
         return self.vfs.reljoin(self.root, f, *insidef)
 
     def setparents(self, p1, p2=nullid):
-        with self.dirstate.parentchange():
-            copies = self.dirstate.setparents(p1, p2)
-            pctx = self[p1]
-            if copies:
-                # Adjust copy records, the dirstate cannot do it, it
-                # requires access to parents manifests. Preserve them
-                # only for entries added to first parent.
-                for f in copies:
-                    if f not in pctx and copies[f] in pctx:
-                        self.dirstate.copy(copies[f], f)
-            if p2 == nullid:
-                for f, s in sorted(self.dirstate.copies().items()):
-                    if f not in pctx and s not in pctx:
-                        self.dirstate.copy(None, f)
+        self[None].setparents(p1, p2)
+        self._quick_access_changeid_invalidate()
 
     def filectx(self, path, changeid=None, fileid=None, changectx=None):
         """changeid must be a changeset revision, if specified.
@@ -1993,7 +2058,7 @@
             )
 
         idbase = b"%.40f#%f" % (random.random(), time.time())
-        ha = hex(hashlib.sha1(idbase).digest())
+        ha = hex(hashutil.sha1(idbase).digest())
         txnid = b'TXN:' + ha
         self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
 
@@ -2179,7 +2244,7 @@
             # fixes the function accumulation.
             hookargs = tr2.hookargs
 
-            def hookfunc():
+            def hookfunc(unused_success):
                 repo = reporef()
                 if hook.hashook(repo.ui, b'txnclose-bookmark'):
                     bmchanges = sorted(tr.changes[b'bookmarks'].items())
@@ -2350,7 +2415,8 @@
             self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
         self.invalidate()
 
-        parentgone = any(p not in self.changelog.nodemap for p in parents)
+        has_node = self.changelog.index.has_node
+        parentgone = any(not has_node(p) for p in parents)
         if parentgone:
             # prevent dirstateguard from overwriting already restored one
             dsguard.close()
@@ -2458,9 +2524,9 @@
 
     def invalidatecaches(self):
 
-        if r'_tagscache' in vars(self):
+        if '_tagscache' in vars(self):
             # can't use delattr on proxy
-            del self.__dict__[r'_tagscache']
+            del self.__dict__['_tagscache']
 
         self._branchcaches.clear()
         self.invalidatevolatilesets()
@@ -2469,6 +2535,7 @@
     def invalidatevolatilesets(self):
         self.filteredrevcache.clear()
         obsolete.clearobscaches(self)
+        self._quick_access_changeid_invalidate()
 
     def invalidatedirstate(self):
         '''Invalidates the dirstate, causing the next call to dirstate
@@ -2479,13 +2546,13 @@
         rereads the dirstate. Use dirstate.invalidate() if you want to
         explicitly read the dirstate again (i.e. restoring it to a previous
         known good state).'''
-        if hasunfilteredcache(self, r'dirstate'):
+        if hasunfilteredcache(self, 'dirstate'):
             for k in self.dirstate._filecache:
                 try:
                     delattr(self.dirstate, k)
                 except AttributeError:
                     pass
-            delattr(self.unfiltered(), r'dirstate')
+            delattr(self.unfiltered(), 'dirstate')
 
     def invalidate(self, clearfilecache=False):
         '''Invalidates both store and non-store parts other than dirstate
@@ -2535,7 +2602,7 @@
         """Reload stats of cached files so that they are flagged as valid"""
         for k, ce in self._filecache.items():
             k = pycompat.sysstr(k)
-            if k == r'dirstate' or k not in self.__dict__:
+            if k == 'dirstate' or k not in self.__dict__:
                 continue
             ce.refresh()
 
@@ -2590,7 +2657,7 @@
                 l.postrelease.append(callback)
                 break
         else:  # no lock have been found.
-            callback()
+            callback(True)
 
     def lock(self, wait=True):
         '''Lock the repository store (.hg/store) and return a weak reference
@@ -2787,7 +2854,7 @@
 
         return fparent1
 
-    def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
+    def checkcommitpatterns(self, wctx, match, status, fail):
         """check for commit arguments that aren't committable"""
         if match.isexact() or match.prefix():
             matched = set(status.modified + status.added + status.removed)
@@ -2798,7 +2865,8 @@
                     continue
                 if f in status.deleted:
                     fail(f, _(b'file not found!'))
-                if f in vdirs:  # visited directory
+                # Is it a directory that exists or used to exist?
+                if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
                     d = f + b'/'
                     for mf in matched:
                         if mf.startswith(d):
@@ -2816,7 +2884,7 @@
         date=None,
         match=None,
         force=False,
-        editor=False,
+        editor=None,
         extra=None,
     ):
         """Add a new revision to current repository.
@@ -2835,8 +2903,6 @@
             match = matchmod.always()
 
         if not force:
-            vdirs = []
-            match.explicitdir = vdirs.append
             match.bad = fail
 
         # lock() for recent changelog (see issue4368)
@@ -2865,7 +2931,7 @@
 
             # make sure all explicit patterns are matched
             if not force:
-                self.checkcommitpatterns(wctx, vdirs, match, status, fail)
+                self.checkcommitpatterns(wctx, match, status, fail)
 
             cctx = context.workingcommitctx(
                 self, status, text, user, date, extra
@@ -2929,7 +2995,7 @@
                     )
                 raise
 
-        def commithook():
+        def commithook(unused_success):
             # hack for command that use a temporary commit (eg: histedit)
             # temporary commit got stripped before hook release
             if self.changelog.hasnode(ret):
@@ -3362,10 +3428,10 @@
             if tr is not None:
                 hookargs.update(tr.hookargs)
             hookargs = pycompat.strkwargs(hookargs)
-            hookargs[r'namespace'] = namespace
-            hookargs[r'key'] = key
-            hookargs[r'old'] = old
-            hookargs[r'new'] = new
+            hookargs['namespace'] = namespace
+            hookargs['key'] = key
+            hookargs['old'] = old
+            hookargs['new'] = new
             self.hook(b'prepushkey', throw=True, **hookargs)
         except error.HookAbort as exc:
             self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
@@ -3375,7 +3441,7 @@
         self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
         ret = pushkey.push(self, namespace, key, old, new)
 
-        def runhook():
+        def runhook(unused_success):
             self.hook(
                 b'pushkey',
                 namespace=namespace,
@@ -3705,7 +3771,7 @@
     # of repos call close() on repo references.
     class poisonedrepository(object):
         def __getattribute__(self, item):
-            if item == r'close':
+            if item == 'close':
                 return object.__getattribute__(self, item)
 
             raise error.ProgrammingError(
@@ -3717,4 +3783,4 @@
 
     # We may have a repoview, which intercepts __setattr__. So be sure
     # we operate at the lowest level possible.
-    object.__setattr__(repo, r'__class__', poisonedrepository)
+    object.__setattr__(repo, '__class__', poisonedrepository)
--- a/mercurial/lock.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/lock.py	Tue Jan 21 13:14:51 2020 -0500
@@ -233,12 +233,13 @@
         return self
 
     def __exit__(self, exc_type, exc_value, exc_tb):
-        self.release()
+        success = all(a is None for a in (exc_type, exc_value, exc_tb))
+        self.release(success=success)
 
     def __del__(self):
         if self.held:
             warnings.warn(
-                r"use lock.release instead of del lock",
+                "use lock.release instead of del lock",
                 category=DeprecationWarning,
                 stacklevel=2,
             )
@@ -330,27 +331,35 @@
                 return None
             raise
 
-    def _testlock(self, locker):
+    def _lockshouldbebroken(self, locker):
         if locker is None:
-            return None
+            return False
         try:
             host, pid = locker.split(b":", 1)
         except ValueError:
-            return locker
+            return False
         if host != lock._host:
-            return locker
+            return False
         try:
             pid = int(pid)
         except ValueError:
-            return locker
+            return False
         if procutil.testpid(pid):
+            return False
+        return True
+
+    def _testlock(self, locker):
+        if not self._lockshouldbebroken(locker):
             return locker
+
         # if locker dead, break lock.  must do this with another lock
         # held, or can race and break valid lock.
         try:
-            l = lock(self.vfs, self.f + b'.break', timeout=0)
-            self.vfs.unlink(self.f)
-            l.release()
+            with lock(self.vfs, self.f + b'.break', timeout=0):
+                locker = self._readlock()
+                if not self._lockshouldbebroken(locker):
+                    return locker
+                self.vfs.unlink(self.f)
         except error.LockError:
             return locker
 
@@ -400,7 +409,7 @@
                 self.acquirefn()
             self._inherited = False
 
-    def release(self):
+    def release(self, success=True):
         """release the lock and execute callback function if any
 
         If the lock has been acquired multiple times, the actual release is
@@ -425,7 +434,7 @@
             # at all.
             if not self._parentheld:
                 for callback in self.postrelease:
-                    callback()
+                    callback(success)
                 # Prevent double usage and help clear cycles.
                 self.postrelease = None
 
--- a/mercurial/logcmdutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/logcmdutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -42,6 +42,17 @@
 )
 
 
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Any,
+        Optional,
+        Tuple,
+    )
+
+    for t in (Any, Optional, Tuple):
+        assert t
+
+
 def getlimit(opts):
     """get the log limit according to option -l/--limit"""
     limit = opts.get(b'limit')
@@ -339,8 +350,11 @@
         self._exthook(ctx)
 
         if self.ui.debugflag:
-            files = ctx.p1().status(ctx)[:3]
-            for key, value in zip([b'files', b'files+', b'files-'], files):
+            files = ctx.p1().status(ctx)
+            for key, value in zip(
+                [b'files', b'files+', b'files-'],
+                [files.modified, files.added, files.removed],
+            ):
                 if value:
                     self.ui.write(
                         columns[key] % b" ".join(value),
@@ -470,9 +484,9 @@
         ):
             files = ctx.p1().status(ctx)
             fm.data(
-                modified=fm.formatlist(files[0], name=b'file'),
-                added=fm.formatlist(files[1], name=b'file'),
-                removed=fm.formatlist(files[2], name=b'file'),
+                modified=fm.formatlist(files.modified, name=b'file'),
+                added=fm.formatlist(files.added, name=b'file'),
+                removed=fm.formatlist(files.removed, name=b'file'),
             )
 
         verbose = not self.ui.debugflag and self.ui.verbose
@@ -584,6 +598,7 @@
         # write changeset metadata, then patch if requested
         key = self._parts[self._tref]
         self.ui.write(self.t.render(key, props))
+        self._exthook(ctx)
         self._showpatch(ctx, graphwidth)
 
         if self._parts[b'footer']:
@@ -840,6 +855,7 @@
 
 
 def getrevs(repo, pats, opts):
+    # type: (Any, Any, Any) -> Tuple[smartset.abstractsmartset, Optional[changesetdiffer]]
     """Return (revs, differ) where revs is a smartset
 
     differ is a changesetdiffer with pre-configured file matcher.
@@ -970,7 +986,7 @@
     differ = changesetdiffer()
     differ._makefilematcher = filematcher
     differ._makehunksfilter = hunksfilter
-    return revs, differ
+    return smartset.baseset(revs), differ
 
 
 def _graphnodeformatter(ui, displayer):
@@ -999,7 +1015,7 @@
     props = props or {}
     formatnode = _graphnodeformatter(ui, displayer)
     state = graphmod.asciistate()
-    styles = state[b'styles']
+    styles = state.styles
 
     # only set graph styling if HGPLAIN is not set.
     if ui.plain(b'graph'):
@@ -1020,7 +1036,7 @@
                 styles[key] = None
 
         # experimental config: experimental.graphshorten
-        state[b'graphshorten'] = ui.configbool(b'experimental', b'graphshorten')
+        state.graphshorten = ui.configbool(b'experimental', b'graphshorten')
 
     for rev, type, ctx, parents in dag:
         char = formatnode(repo, ctx)
--- a/mercurial/lsprof.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/lsprof.py	Tue Jan 21 13:14:51 2020 -0500
@@ -31,7 +31,7 @@
     def __init__(self, data):
         self.data = data
 
-    def sort(self, crit=r"inlinetime"):
+    def sort(self, crit="inlinetime"):
         """XXX docstring"""
         # profiler_entries isn't defined when running under PyPy.
         if profiler_entry:
@@ -135,9 +135,9 @@
                 mname = _fn2mod[code.co_filename] = k
                 break
         else:
-            mname = _fn2mod[code.co_filename] = r'<%s>' % code.co_filename
+            mname = _fn2mod[code.co_filename] = '<%s>' % code.co_filename
 
-    res = r'%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
+    res = '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
 
     if sys.version_info.major >= 3:
         res = res.encode('latin-1')
--- a/mercurial/mail.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/mail.py	Tue Jan 21 13:14:51 2020 -0500
@@ -36,6 +36,12 @@
     stringutil,
 )
 
+if pycompat.TYPE_CHECKING:
+    from typing import Any, List, Tuple, Union
+
+    # keep pyflakes happy
+    assert all((Any, List, Tuple, Union))
+
 
 class STARTTLS(smtplib.SMTP):
     '''Derived class to verify the peer certificate for STARTTLS.
@@ -94,11 +100,12 @@
             ui=self._ui,
             serverhostname=self._host,
         )
-        self.file = new_socket.makefile(r'rb')
+        self.file = new_socket.makefile('rb')
         return new_socket
 
 
 def _pyhastls():
+    # type: () -> bool
     """Returns true iff Python has TLS support, false otherwise."""
     try:
         import ssl
@@ -190,7 +197,7 @@
         raise error.Abort(
             b'%s %s'
             % (
-                os.path.basename(program.split(None, 1)[0]),
+                os.path.basename(procutil.shellsplit(program)[0]),
                 procutil.explainexit(ret),
             )
         )
@@ -201,7 +208,7 @@
     fp = open(mbox, b'ab+')
     # Should be time.asctime(), but Windows prints 2-characters day
     # of month instead of one. Make them print the same thing.
-    date = time.strftime(r'%a %b %d %H:%M:%S %Y', time.localtime())
+    date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
     fp.write(
         b'From %s %s\n'
         % (encoding.strtolocal(sender), encoding.strtolocal(date))
@@ -246,42 +253,50 @@
 
 
 def codec2iana(cs):
+    # type: (str) -> str
     ''''''
-    cs = pycompat.sysbytes(email.charset.Charset(cs).input_charset.lower())
+    cs = email.charset.Charset(cs).input_charset.lower()
 
     # "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1"
-    if cs.startswith(b"iso") and not cs.startswith(b"iso-"):
-        return b"iso-" + cs[3:]
+    if cs.startswith("iso") and not cs.startswith("iso-"):
+        return "iso-" + cs[3:]
     return cs
 
 
-def mimetextpatch(s, subtype=b'plain', display=False):
+def mimetextpatch(s, subtype='plain', display=False):
+    # type: (bytes, str, bool) -> email.message.Message
     '''Return MIME message suitable for a patch.
     Charset will be detected by first trying to decode as us-ascii, then utf-8,
     and finally the global encodings. If all those fail, fall back to
     ISO-8859-1, an encoding with that allows all byte sequences.
     Transfer encodings will be used if necessary.'''
 
-    cs = [b'us-ascii', b'utf-8', encoding.encoding, encoding.fallbackencoding]
+    cs = [
+        'us-ascii',
+        'utf-8',
+        pycompat.sysstr(encoding.encoding),
+        pycompat.sysstr(encoding.fallbackencoding),
+    ]
     if display:
-        cs = [b'us-ascii']
+        cs = ['us-ascii']
     for charset in cs:
         try:
-            s.decode(pycompat.sysstr(charset))
+            s.decode(charset)
             return mimetextqp(s, subtype, codec2iana(charset))
         except UnicodeDecodeError:
             pass
 
-    return mimetextqp(s, subtype, b"iso-8859-1")
+    return mimetextqp(s, subtype, "iso-8859-1")
 
 
 def mimetextqp(body, subtype, charset):
+    # type: (bytes, str, str) -> email.message.Message
     '''Return MIME message.
     Quoted-printable transfer encoding will be used if necessary.
     '''
     cs = email.charset.Charset(charset)
     msg = email.message.Message()
-    msg.set_type(pycompat.sysstr(b'text/' + subtype))
+    msg.set_type('text/' + subtype)
 
     for line in body.splitlines():
         if len(line) > 950:
@@ -301,20 +316,25 @@
 
 
 def _charsets(ui):
+    # type: (Any) -> List[str]
     '''Obtains charsets to send mail parts not containing patches.'''
-    charsets = [cs.lower() for cs in ui.configlist(b'email', b'charsets')]
+    charsets = [
+        pycompat.sysstr(cs.lower())
+        for cs in ui.configlist(b'email', b'charsets')
+    ]
     fallbacks = [
-        encoding.fallbackencoding.lower(),
-        encoding.encoding.lower(),
-        b'utf-8',
+        pycompat.sysstr(encoding.fallbackencoding.lower()),
+        pycompat.sysstr(encoding.encoding.lower()),
+        'utf-8',
     ]
     for cs in fallbacks:  # find unique charsets while keeping order
         if cs not in charsets:
             charsets.append(cs)
-    return [cs for cs in charsets if not cs.endswith(b'ascii')]
+    return [cs for cs in charsets if not cs.endswith('ascii')]
 
 
 def _encode(ui, s, charsets):
+    # type: (Any, bytes, List[str]) -> Tuple[bytes, str]
     '''Returns (converted) string, charset tuple.
     Finds out best charset by cycling through sendcharsets in descending
     order. Tries both encoding and fallbackencoding for input. Only as
@@ -327,14 +347,17 @@
         # wants, and fall back to garbage-in-ascii.
         for ocs in sendcharsets:
             try:
-                return s.encode(pycompat.sysstr(ocs)), ocs
+                return s.encode(ocs), ocs
             except UnicodeEncodeError:
                 pass
             except LookupError:
-                ui.warn(_(b'ignoring invalid sendcharset: %s\n') % ocs)
+                ui.warn(
+                    _(b'ignoring invalid sendcharset: %s\n')
+                    % pycompat.sysbytes(ocs)
+                )
         else:
             # Everything failed, ascii-armor what we've got and send it.
-            return s.encode('ascii', 'backslashreplace')
+            return s.encode('ascii', 'backslashreplace'), 'us-ascii'
     # We have a bytes of unknown encoding. We'll try and guess a valid
     # encoding, falling back to pretending we had ascii even though we
     # know that's wrong.
@@ -349,27 +372,32 @@
                 continue
             for ocs in sendcharsets:
                 try:
-                    return u.encode(pycompat.sysstr(ocs)), ocs
+                    return u.encode(ocs), ocs
                 except UnicodeEncodeError:
                     pass
                 except LookupError:
-                    ui.warn(_(b'ignoring invalid sendcharset: %s\n') % ocs)
+                    ui.warn(
+                        _(b'ignoring invalid sendcharset: %s\n')
+                        % pycompat.sysbytes(ocs)
+                    )
     # if ascii, or all conversion attempts fail, send (broken) ascii
-    return s, b'us-ascii'
+    return s, 'us-ascii'
 
 
 def headencode(ui, s, charsets=None, display=False):
+    # type: (Any, Union[bytes, str], List[str], bool) -> str
     '''Returns RFC-2047 compliant header from given string.'''
     if not display:
         # split into words?
         s, cs = _encode(ui, s, charsets)
-        return encoding.strtolocal(email.header.Header(s, cs).encode())
-    return s
+        return email.header.Header(s, cs).encode()
+    return encoding.strfromlocal(s)
 
 
 def _addressencode(ui, name, addr, charsets=None):
-    assert isinstance(addr, bytes)
-    name = encoding.strfromlocal(headencode(ui, name, charsets))
+    # type: (Any, str, str, List[str]) -> str
+    addr = encoding.strtolocal(addr)
+    name = headencode(ui, name, charsets)
     try:
         acc, dom = addr.split(b'@')
         acc.decode('ascii')
@@ -383,45 +411,46 @@
             addr.decode('ascii')
         except UnicodeDecodeError:
             raise error.Abort(_(b'invalid local address: %s') % addr)
-    return pycompat.bytesurl(
-        email.utils.formataddr((name, encoding.strfromlocal(addr)))
-    )
+    return email.utils.formataddr((name, encoding.strfromlocal(addr)))
 
 
 def addressencode(ui, address, charsets=None, display=False):
+    # type: (Any, bytes, List[str], bool) -> str
     '''Turns address into RFC-2047 compliant header.'''
     if display or not address:
-        return address or b''
+        return encoding.strfromlocal(address or b'')
     name, addr = email.utils.parseaddr(encoding.strfromlocal(address))
-    return _addressencode(ui, name, encoding.strtolocal(addr), charsets)
+    return _addressencode(ui, name, addr, charsets)
 
 
 def addrlistencode(ui, addrs, charsets=None, display=False):
+    # type: (Any, List[bytes], List[str], bool) -> List[str]
     '''Turns a list of addresses into a list of RFC-2047 compliant headers.
     A single element of input list may contain multiple addresses, but output
     always has one address per item'''
+    straddrs = []
     for a in addrs:
-        assert isinstance(a, bytes), r'%r unexpectedly not a bytestr' % a
+        assert isinstance(a, bytes), '%r unexpectedly not a bytestr' % a
+        straddrs.append(encoding.strfromlocal(a))
     if display:
-        return [a.strip() for a in addrs if a.strip()]
+        return [a.strip() for a in straddrs if a.strip()]
 
     result = []
-    for name, addr in email.utils.getaddresses(
-        [encoding.strfromlocal(a) for a in addrs]
-    ):
+    for name, addr in email.utils.getaddresses(straddrs):
         if name or addr:
-            r = _addressencode(ui, name, encoding.strtolocal(addr), charsets)
+            r = _addressencode(ui, name, addr, charsets)
             result.append(r)
     return result
 
 
 def mimeencode(ui, s, charsets=None, display=False):
+    # type: (Any, bytes, List[str], bool) -> email.message.Message
     '''creates mime text object, encodes it if needed, and sets
     charset and transfer-encoding accordingly.'''
-    cs = b'us-ascii'
+    cs = 'us-ascii'
     if not display:
         s, cs = _encode(ui, s, charsets)
-    return mimetextqp(s, b'plain', cs)
+    return mimetextqp(s, 'plain', cs)
 
 
 if pycompat.ispy3:
@@ -429,12 +458,13 @@
     Generator = email.generator.BytesGenerator
 
     def parse(fp):
+        # type: (Any) -> email.message.Message
         ep = email.parser.Parser()
         # disable the "universal newlines" mode, which isn't binary safe.
         # I have no idea if ascii/surrogateescape is correct, but that's
         # what the standard Python email parser does.
         fp = io.TextIOWrapper(
-            fp, encoding=r'ascii', errors=r'surrogateescape', newline=chr(10)
+            fp, encoding='ascii', errors='surrogateescape', newline=chr(10)
         )
         try:
             return ep.parse(fp)
@@ -442,6 +472,7 @@
             fp.detach()
 
     def parsebytes(data):
+        # type: (bytes) -> email.message.Message
         ep = email.parser.BytesParser()
         return ep.parsebytes(data)
 
@@ -451,15 +482,18 @@
     Generator = email.generator.Generator
 
     def parse(fp):
+        # type: (Any) -> email.message.Message
         ep = email.parser.Parser()
         return ep.parse(fp)
 
     def parsebytes(data):
+        # type: (str) -> email.message.Message
         ep = email.parser.Parser()
         return ep.parsestr(data)
 
 
 def headdecode(s):
+    # type: (Union[email.header.Header, bytes]) -> bytes
     '''Decodes RFC-2047 header'''
     uparts = []
     for part, charset in email.header.decode_header(s):
--- a/mercurial/manifest.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/manifest.py	Tue Jan 21 13:14:51 2020 -0500
@@ -23,6 +23,7 @@
 from . import (
     error,
     mdiff,
+    pathutil,
     policy,
     pycompat,
     revlog,
@@ -33,7 +34,7 @@
     util as interfaceutil,
 )
 
-parsers = policy.importmod(r'parsers')
+parsers = policy.importmod('parsers')
 propertycache = util.propertycache
 
 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
@@ -494,7 +495,7 @@
 
     @propertycache
     def _dirs(self):
-        return util.dirs(self)
+        return pathutil.dirs(self)
 
     def dirs(self):
         return self._dirs
@@ -1104,7 +1105,7 @@
 
     @propertycache
     def _alldirs(self):
-        return util.dirs(self)
+        return pathutil.dirs(self)
 
     def dirs(self):
         return self._alldirs
@@ -1571,7 +1572,11 @@
         reporef = weakref.ref(repo)
         manifestrevlogref = weakref.ref(self)
 
-        def persistmanifestcache():
+        def persistmanifestcache(success):
+            # Repo is in an unknown state, do not persist.
+            if not success:
+                return
+
             repo = reporef()
             self = manifestrevlogref()
             if repo is None or self is None:
--- a/mercurial/match.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/match.py	Tue Jan 21 13:14:51 2020 -0500
@@ -24,7 +24,7 @@
 )
 from .utils import stringutil
 
-rustmod = policy.importrust(r'filepatterns')
+rustmod = policy.importrust('filepatterns')
 
 allpatternkinds = (
     b're',
@@ -57,7 +57,7 @@
         return m.match
 
 
-def _expandsets(kindpats, ctx=None, listsubrepos=False, badfn=None):
+def _expandsets(cwd, kindpats, ctx=None, listsubrepos=False, badfn=None):
     '''Returns the kindpats list with the 'set' patterns expanded to matchers'''
     matchers = []
     other = []
@@ -68,11 +68,11 @@
                 raise error.ProgrammingError(
                     b"fileset expression with no context"
                 )
-            matchers.append(ctx.matchfileset(pat, badfn=badfn))
+            matchers.append(ctx.matchfileset(cwd, pat, badfn=badfn))
 
             if listsubrepos:
                 for subpath in ctx.substate:
-                    sm = ctx.sub(subpath).matchfileset(pat, badfn=badfn)
+                    sm = ctx.sub(subpath).matchfileset(cwd, pat, badfn=badfn)
                     pm = prefixdirmatcher(subpath, sm, badfn=badfn)
                     matchers.append(pm)
 
@@ -117,11 +117,11 @@
 
 
 def _buildkindpatsmatcher(
-    matchercls, root, kindpats, ctx=None, listsubrepos=False, badfn=None
+    matchercls, root, cwd, kindpats, ctx=None, listsubrepos=False, badfn=None,
 ):
     matchers = []
     fms, kindpats = _expandsets(
-        kindpats, ctx=ctx, listsubrepos=listsubrepos, badfn=badfn
+        cwd, kindpats, ctx=ctx, listsubrepos=listsubrepos, badfn=badfn,
     )
     if kindpats:
         m = matchercls(root, kindpats, badfn=badfn)
@@ -182,35 +182,38 @@
                           the same directory
     '<something>' - a pattern of the specified default type
 
+    >>> def _match(root, *args, **kwargs):
+    ...     return match(util.localpath(root), *args, **kwargs)
+
     Usually a patternmatcher is returned:
-    >>> match(b'foo', b'.', [b're:.*\.c$', b'path:foo/a', b'*.py'])
+    >>> _match(b'/foo', b'.', [b're:.*\.c$', b'path:foo/a', b'*.py'])
     <patternmatcher patterns='.*\\.c$|foo/a(?:/|$)|[^/]*\\.py$'>
 
     Combining 'patterns' with 'include' (resp. 'exclude') gives an
     intersectionmatcher (resp. a differencematcher):
-    >>> type(match(b'foo', b'.', [b're:.*\.c$'], include=[b'path:lib']))
+    >>> type(_match(b'/foo', b'.', [b're:.*\.c$'], include=[b'path:lib']))
     <class 'mercurial.match.intersectionmatcher'>
-    >>> type(match(b'foo', b'.', [b're:.*\.c$'], exclude=[b'path:build']))
+    >>> type(_match(b'/foo', b'.', [b're:.*\.c$'], exclude=[b'path:build']))
     <class 'mercurial.match.differencematcher'>
 
     Notice that, if 'patterns' is empty, an alwaysmatcher is returned:
-    >>> match(b'foo', b'.', [])
+    >>> _match(b'/foo', b'.', [])
     <alwaysmatcher>
 
     The 'default' argument determines which kind of pattern is assumed if a
     pattern has no prefix:
-    >>> match(b'foo', b'.', [b'.*\.c$'], default=b're')
+    >>> _match(b'/foo', b'.', [b'.*\.c$'], default=b're')
     <patternmatcher patterns='.*\\.c$'>
-    >>> match(b'foo', b'.', [b'main.py'], default=b'relpath')
+    >>> _match(b'/foo', b'.', [b'main.py'], default=b'relpath')
     <patternmatcher patterns='main\\.py(?:/|$)'>
-    >>> match(b'foo', b'.', [b'main.py'], default=b're')
+    >>> _match(b'/foo', b'.', [b'main.py'], default=b're')
     <patternmatcher patterns='main.py'>
 
     The primary use of matchers is to check whether a value (usually a file
     name) matches againset one of the patterns given at initialization. There
     are two ways of doing this check.
 
-    >>> m = match(b'foo', b'', [b're:.*\.c$', b'relpath:a'])
+    >>> m = _match(b'/foo', b'', [b're:.*\.c$', b'relpath:a'])
 
     1. Calling the matcher with a file name returns True if any pattern
     matches that file name:
@@ -228,6 +231,8 @@
     >>> m.exact(b'main.c')
     False
     """
+    assert os.path.isabs(root)
+    cwd = os.path.join(root, util.localpath(cwd))
     normalize = _donormalize
     if icasefs:
         dirstate = ctx.repo().dirstate
@@ -256,6 +261,7 @@
             m = _buildkindpatsmatcher(
                 patternmatcher,
                 root,
+                cwd,
                 kindpats,
                 ctx=ctx,
                 listsubrepos=listsubrepos,
@@ -271,6 +277,7 @@
         im = _buildkindpatsmatcher(
             includematcher,
             root,
+            cwd,
             kindpats,
             ctx=ctx,
             listsubrepos=listsubrepos,
@@ -282,6 +289,7 @@
         em = _buildkindpatsmatcher(
             includematcher,
             root,
+            cwd,
             kindpats,
             ctx=ctx,
             listsubrepos=listsubrepos,
@@ -345,7 +353,10 @@
                 ):
                     kindpats.append((k, p, source or pat))
             except error.Abort as inst:
-                raise error.Abort(b'%s: %s' % (pat, inst[0]))
+                raise error.Abort(
+                    b'%s: %s'
+                    % (pat, inst[0])  # pytype: disable=unsupported-operands
+                )
             except IOError as inst:
                 if warn:
                     warn(
@@ -372,10 +383,6 @@
         '''Callback from dirstate.walk for each explicit file that can't be
         found/accessed, with an error message.'''
 
-    # If an explicitdir is set, it will be called when an explicitly listed
-    # directory is visited.
-    explicitdir = None
-
     # If an traversedir is set, it will be called when a directory discovered
     # by recursive traversal is visited.
     traversedir = None
@@ -543,16 +550,6 @@
         return b'<predicatenmatcher pred=%s>' % s
 
 
-def normalizerootdir(dir, funcname):
-    if dir == b'.':
-        util.nouideprecwarn(
-            b"match.%s() no longer accepts '.', use '' instead." % funcname,
-            b'5.1',
-        )
-        return b''
-    return dir
-
-
 class patternmatcher(basematcher):
     r"""Matches a set of (kind, pat, source) against a 'root' directory.
 
@@ -595,17 +592,17 @@
 
     @propertycache
     def _dirs(self):
-        return set(util.dirs(self._fileset))
+        return set(pathutil.dirs(self._fileset))
 
     def visitdir(self, dir):
-        dir = normalizerootdir(dir, b'visitdir')
         if self._prefix and dir in self._fileset:
             return b'all'
         return (
             dir in self._fileset
             or dir in self._dirs
             or any(
-                parentdir in self._fileset for parentdir in util.finddirs(dir)
+                parentdir in self._fileset
+                for parentdir in pathutil.finddirs(dir)
             )
         )
 
@@ -626,9 +623,9 @@
         return b'<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats)
 
 
-# This is basically a reimplementation of util.dirs that stores the children
-# instead of just a count of them, plus a small optional optimization to avoid
-# some directories we don't need.
+# This is basically a reimplementation of pathutil.dirs that stores the
+# children instead of just a count of them, plus a small optional optimization
+# to avoid some directories we don't need.
 class _dirchildren(object):
     def __init__(self, paths, onlyinclude=None):
         self._dirs = {}
@@ -650,7 +647,7 @@
     @staticmethod
     def _findsplitdirs(path):
         # yields (dirname, basename) tuples, walking back to the root.  This is
-        # very similar to util.finddirs, except:
+        # very similar to pathutil.finddirs, except:
         #  - produces a (dirname, basename) tuple, not just 'dirname'
         # Unlike manifest._splittopdir, this does not suffix `dirname` with a
         # slash.
@@ -682,14 +679,15 @@
         self._parents = parents
 
     def visitdir(self, dir):
-        dir = normalizerootdir(dir, b'visitdir')
         if self._prefix and dir in self._roots:
             return b'all'
         return (
             dir in self._roots
             or dir in self._dirs
             or dir in self._parents
-            or any(parentdir in self._roots for parentdir in util.finddirs(dir))
+            or any(
+                parentdir in self._roots for parentdir in pathutil.finddirs(dir)
+            )
         )
 
     @propertycache
@@ -714,7 +712,9 @@
             b'' in self._roots
             or dir in self._roots
             or dir in self._dirs
-            or any(parentdir in self._roots for parentdir in util.finddirs(dir))
+            or any(
+                parentdir in self._roots for parentdir in pathutil.finddirs(dir)
+            )
         ):
             return b'this'
 
@@ -760,15 +760,12 @@
 
     @propertycache
     def _dirs(self):
-        return set(util.dirs(self._fileset))
+        return set(pathutil.dirs(self._fileset))
 
     def visitdir(self, dir):
-        dir = normalizerootdir(dir, b'visitdir')
         return dir in self._dirs
 
     def visitchildrenset(self, dir):
-        dir = normalizerootdir(dir, b'visitchildrenset')
-
         if not self._fileset or dir not in self._dirs:
             return set()
 
@@ -799,8 +796,7 @@
     '''Composes two matchers by matching if the first matches and the second
     does not.
 
-    The second matcher's non-matching-attributes (bad, explicitdir,
-    traversedir) are ignored.
+    The second matcher's non-matching-attributes (bad, traversedir) are ignored.
     '''
 
     def __init__(self, m1, m2):
@@ -808,7 +804,6 @@
         self._m1 = m1
         self._m2 = m2
         self.bad = m1.bad
-        self.explicitdir = m1.explicitdir
         self.traversedir = m1.traversedir
 
     def matchfn(self, f):
@@ -869,8 +864,7 @@
 def intersectmatchers(m1, m2):
     '''Composes two matchers by matching if both of them match.
 
-    The second matcher's non-matching-attributes (bad, explicitdir,
-    traversedir) are ignored.
+    The second matcher's non-matching-attributes (bad, traversedir) are ignored.
     '''
     if m1 is None or m2 is None:
         return m1 or m2
@@ -879,7 +873,6 @@
         # TODO: Consider encapsulating these things in a class so there's only
         # one thing to copy from m1.
         m.bad = m1.bad
-        m.explicitdir = m1.explicitdir
         m.traversedir = m1.traversedir
         return m
     if m2.always():
@@ -894,7 +887,6 @@
         self._m1 = m1
         self._m2 = m2
         self.bad = m1.bad
-        self.explicitdir = m1.explicitdir
         self.traversedir = m1.traversedir
 
     @propertycache
@@ -956,7 +948,7 @@
     The paths are remapped to remove/insert the path as needed:
 
     >>> from . import pycompat
-    >>> m1 = match(b'root', b'', [b'a.txt', b'sub/b.txt'])
+    >>> m1 = match(util.localpath(b'/root'), b'', [b'a.txt', b'sub/b.txt'], auditor=lambda name: None)
     >>> m2 = subdirmatcher(b'sub', m1)
     >>> m2(b'a.txt')
     False
@@ -1005,7 +997,6 @@
         return self._matcher.matchfn(self._path + b"/" + f)
 
     def visitdir(self, dir):
-        dir = normalizerootdir(dir, b'visitdir')
         if dir == b'':
             dir = self._path
         else:
@@ -1013,7 +1004,6 @@
         return self._matcher.visitdir(dir)
 
     def visitchildrenset(self, dir):
-        dir = normalizerootdir(dir, b'visitchildrenset')
         if dir == b'':
             dir = self._path
         else:
@@ -1037,13 +1027,12 @@
 class prefixdirmatcher(basematcher):
     """Adapt a matcher to work on a parent directory.
 
-    The matcher's non-matching-attributes (bad, explicitdir, traversedir) are
-    ignored.
+    The matcher's non-matching-attributes (bad, traversedir) are ignored.
 
     The prefix path should usually be the relative path from the root of
     this matcher to the root of the wrapped matcher.
 
-    >>> m1 = match(util.localpath(b'root/d/e'), b'f', [b'../a.txt', b'b.txt'])
+    >>> m1 = match(util.localpath(b'/root/d/e'), b'f', [b'../a.txt', b'b.txt'], auditor=lambda name: None)
     >>> m2 = prefixdirmatcher(b'd/e', m1)
     >>> m2(b'a.txt')
     False
@@ -1086,7 +1075,7 @@
 
     @propertycache
     def _pathdirs(self):
-        return set(util.finddirs(self._path))
+        return set(pathutil.finddirs(self._path))
 
     def visitdir(self, dir):
         if dir == self._path:
@@ -1121,14 +1110,13 @@
 class unionmatcher(basematcher):
     """A matcher that is the union of several matchers.
 
-    The non-matching-attributes (bad, explicitdir, traversedir) are taken from
-    the first matcher.
+    The non-matching-attributes (bad, traversedir) are taken from the first
+    matcher.
     """
 
     def __init__(self, matchers):
         m1 = matchers[0]
         super(unionmatcher, self).__init__()
-        self.explicitdir = m1.explicitdir
         self.traversedir = m1.traversedir
         self._matchers = matchers
 
@@ -1507,8 +1495,8 @@
     p = set()
     # Add the parents as non-recursive/exact directories, since they must be
     # scanned to get to either the roots or the other exact directories.
-    p.update(util.dirs(d))
-    p.update(util.dirs(r))
+    p.update(pathutil.dirs(d))
+    p.update(pathutil.dirs(r))
 
     # FIXME: all uses of this function convert these to sets, do so before
     # returning.
--- a/mercurial/mdiff.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/mdiff.py	Tue Jan 21 13:14:51 2020 -0500
@@ -27,8 +27,8 @@
 
 _missing_newline_marker = b"\\ No newline at end of file\n"
 
-bdiff = policy.importmod(r'bdiff')
-mpatch = policy.importmod(r'mpatch')
+bdiff = policy.importmod('bdiff')
+mpatch = policy.importmod('mpatch')
 
 blocks = bdiff.blocks
 fixws = bdiff.fixws
@@ -38,6 +38,7 @@
 splitnewlines = bdiff.splitnewlines
 
 
+# TODO: this looks like it could be an attrs, which might help pytype
 class diffopts(object):
     '''context is the number of context lines
     text treats all files as text
@@ -52,6 +53,8 @@
     upgrade generates git diffs to avoid data loss
     '''
 
+    _HAS_DYNAMIC_ATTRIBUTES = True
+
     defaults = {
         b'context': 3,
         b'text': False,
--- a/mercurial/merge.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/merge.py	Tue Jan 21 13:14:51 2020 -0500
@@ -8,7 +8,6 @@
 from __future__ import absolute_import
 
 import errno
-import hashlib
 import shutil
 import stat
 import struct
@@ -32,12 +31,14 @@
     filemerge,
     match as matchmod,
     obsutil,
+    pathutil,
     pycompat,
     scmutil,
     subrepoutil,
     util,
     worker,
 )
+from .utils import hashutil
 
 _pack = struct.pack
 _unpack = struct.unpack
@@ -511,7 +512,7 @@
         """hash the path of a local file context for storage in the .hg/merge
         directory."""
 
-        return hex(hashlib.sha1(path).digest())
+        return hex(hashutil.sha1(path).digest())
 
     def add(self, fcl, fco, fca, fd):
         """add a new (potentially?) conflicting file the merge state
@@ -813,7 +814,7 @@
             return False
 
         # Check for path prefixes that exist as unknown files.
-        for p in reversed(list(util.finddirs(f))):
+        for p in reversed(list(pathutil.finddirs(f))):
             if p in self._missingdircache:
                 return
             if p in self._unknowndircache:
@@ -947,7 +948,7 @@
             backup = (
                 f in fileconflicts
                 or f in pathconflicts
-                or any(p in pathconflicts for p in util.finddirs(f))
+                or any(p in pathconflicts for p in pathutil.finddirs(f))
             )
             (flags,) = args
             actions[f] = (ACTION_GET, (flags, backup), msg)
@@ -1077,7 +1078,7 @@
     in.
     """
     for f in manifest:
-        for p in util.finddirs(f):
+        for p in pathutil.finddirs(f):
             if p in dirs:
                 yield f, p
                 break
@@ -1116,7 +1117,7 @@
             ACTION_CREATED_MERGE,
         ):
             # This action may create a new local file.
-            createdfiledirs.update(util.finddirs(f))
+            createdfiledirs.update(pathutil.finddirs(f))
             if mf.hasdir(f):
                 # The file aliases a local directory.  This might be ok if all
                 # the files in the local directory are being deleted.  This
@@ -1710,7 +1711,7 @@
                 # with a directory this file is in, and if so, back that up.
                 conflicting = f
                 if not repo.wvfs.lexists(f):
-                    for p in util.finddirs(f):
+                    for p in pathutil.finddirs(f):
                         if repo.wvfs.isfileorlink(p):
                             conflicting = p
                             break
@@ -2092,7 +2093,7 @@
 
 
 def recordupdates(repo, actions, branchmerge, getfiledata):
-    b"record merge actions to the dirstate"
+    """record merge actions to the dirstate"""
     # remove (must come first)
     for f, args, msg in actions.get(ACTION_REMOVE, []):
         if branchmerge:
@@ -2581,7 +2582,7 @@
 
 
 def graft(
-    repo, ctx, pctx, labels=None, keepparent=False, keepconflictparent=False
+    repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
 ):
     """Do a graft-like merge.
 
@@ -2592,7 +2593,7 @@
     renames/copies appropriately.
 
     ctx - changeset to rebase
-    pctx - merge base, usually ctx.p1()
+    base - merge base, usually ctx.p1()
     labels - merge labels eg ['local', 'graft']
     keepparent - keep second parent if any
     keepconflictparent - if unresolved, keep parent used for the merge
@@ -2604,14 +2605,16 @@
     # to copy commits), and 2) informs update that the incoming changes are
     # newer than the destination so it doesn't prompt about "remote changed foo
     # which local deleted".
-    mergeancestor = repo.changelog.isancestor(repo[b'.'].node(), ctx.node())
+    wctx = repo[None]
+    pctx = wctx.p1()
+    mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
 
     stats = update(
         repo,
         ctx.node(),
         True,
         True,
-        pctx.node(),
+        base.node(),
         mergeancestor=mergeancestor,
         labels=labels,
     )
@@ -2621,15 +2624,18 @@
     else:
         pother = nullid
         parents = ctx.parents()
-        if keepparent and len(parents) == 2 and pctx in parents:
-            parents.remove(pctx)
+        if keepparent and len(parents) == 2 and base in parents:
+            parents.remove(base)
             pother = parents[0].node()
+    # Never set both parents equal to each other
+    if pother == pctx.node():
+        pother = nullid
 
     with repo.dirstate.parentchange():
-        repo.setparents(repo[b'.'].node(), pother)
+        repo.setparents(pctx.node(), pother)
         repo.dirstate.write(repo.currenttransaction())
         # fix up dirstate for copies and renames
-        copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
+        copies.graftcopies(wctx, ctx, base)
     return stats
 
 
@@ -2675,7 +2681,6 @@
 
     # There's no API to copy a matcher. So mutate the passed matcher and
     # restore it when we're done.
-    oldexplicitdir = matcher.explicitdir
     oldtraversedir = matcher.traversedir
 
     res = []
@@ -2683,7 +2688,7 @@
     try:
         if removeemptydirs:
             directories = []
-            matcher.explicitdir = matcher.traversedir = directories.append
+            matcher.traversedir = directories.append
 
         status = repo.status(match=matcher, ignored=ignored, unknown=True)
 
@@ -2705,5 +2710,4 @@
         return res
 
     finally:
-        matcher.explicitdir = oldexplicitdir
         matcher.traversedir = oldtraversedir
--- a/mercurial/obsolete.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/obsolete.py	Tue Jan 21 13:14:51 2020 -0500
@@ -70,7 +70,6 @@
 from __future__ import absolute_import
 
 import errno
-import hashlib
 import struct
 
 from .i18n import _
@@ -85,9 +84,12 @@
     pycompat,
     util,
 )
-from .utils import dateutil
+from .utils import (
+    dateutil,
+    hashutil,
+)
 
-parsers = policy.importmod(r'parsers')
+parsers = policy.importmod('parsers')
 
 _pack = struct.pack
 _unpack = struct.unpack
@@ -580,7 +582,7 @@
         return len(self._all)
 
     def __nonzero__(self):
-        if not self._cached(r'_all'):
+        if not self._cached('_all'):
             try:
                 return self.svfs.stat(b'obsstore').st_size > 1
             except OSError as inst:
@@ -641,7 +643,7 @@
                 raise ValueError(succ)
         if prec in succs:
             raise ValueError(
-                r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
+                'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
             )
 
         metadata = tuple(sorted(pycompat.iteritems(metadata)))
@@ -752,11 +754,11 @@
         markers = list(markers)  # to allow repeated iteration
         self._data = self._data + rawdata
         self._all.extend(markers)
-        if self._cached(r'successors'):
+        if self._cached('successors'):
             _addsuccessors(self.successors, markers)
-        if self._cached(r'predecessors'):
+        if self._cached('predecessors'):
             _addpredecessors(self.predecessors, markers)
-        if self._cached(r'children'):
+        if self._cached('children'):
             _addchildren(self.children, markers)
         _checkinvalidmarkers(markers)
 
@@ -802,7 +804,7 @@
     # rely on obsstore class default when possible.
     kwargs = {}
     if defaultformat is not None:
-        kwargs[r'defaultformat'] = defaultformat
+        kwargs['defaultformat'] = defaultformat
     readonly = not isenabled(repo, createmarkersopt)
     store = obsstore(repo.svfs, readonly=readonly, **kwargs)
     if store and readonly:
@@ -980,7 +982,7 @@
     phase = repo._phasecache.phase  # would be faster to grab the full list
     public = phases.public
     cl = repo.changelog
-    torev = cl.nodemap.get
+    torev = cl.index.get_rev
     tonode = cl.node
     obsstore = repo.obsstore
     for rev in repo.revs(b'(not public()) and (not obsolete())'):
@@ -1028,7 +1030,7 @@
 
 def makefoldid(relation, user):
 
-    folddigest = hashlib.sha1(user)
+    folddigest = hashutil.sha1(user)
     for p in relation[0] + relation[1]:
         folddigest.update(b'%d' % p.rev())
         folddigest.update(p.node())
--- a/mercurial/obsutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/obsutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -254,7 +254,7 @@
     unfi = repo.unfiltered()
 
     # shortcut to various useful item
-    nm = unfi.changelog.nodemap
+    has_node = unfi.changelog.index.has_node
     precursorsmarkers = unfi.obsstore.predecessors
     successormarkers = unfi.obsstore.successors
     childrenmarkers = unfi.obsstore.children
@@ -302,7 +302,7 @@
                 continue
 
             # is this a locally known node ?
-            known = prec in nm
+            known = has_node(prec)
             # if locally-known and not in the <nodes> set the traversal
             # stop here.
             if known and prec not in nodes:
@@ -333,7 +333,7 @@
     if repo.obsstore:
         # We only need this complicated logic if there is obsolescence
         # XXX will probably deserve an optimised revset.
-        nm = repo.changelog.nodemap
+        has_node = repo.changelog.index.has_node
         plen = -1
         # compute the whole set of successors or descendants
         while len(foreground) != plen:
@@ -341,7 +341,7 @@
             succs = set(c.node() for c in foreground)
             mutable = [c.node() for c in foreground if c.mutable()]
             succs.update(allsuccessors(repo.obsstore, mutable))
-            known = (n for n in succs if n in nm)
+            known = (n for n in succs if has_node(n))
             foreground = set(repo.set(b'%ln::', known))
     return set(c.node() for c in foreground)
 
@@ -483,7 +483,7 @@
 
 def getobsoleted(repo, tr):
     """return the set of pre-existing revisions obsoleted by a transaction"""
-    torev = repo.unfiltered().changelog.nodemap.get
+    torev = repo.unfiltered().changelog.index.get_rev
     phase = repo._phasecache.phase
     succsmarkers = repo.obsstore.successors.get
     public = phases.public
--- a/mercurial/parser.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/parser.py	Tue Jan 21 13:14:51 2020 -0500
@@ -34,17 +34,17 @@
         self.current = None
 
     def _advance(self):
-        b'advance the tokenizer'
+        """advance the tokenizer"""
         t = self.current
         self.current = next(self._iter, None)
         return t
 
     def _hasnewterm(self):
-        b'True if next token may start new term'
+        """True if next token may start new term"""
         return any(self._elements[self.current[0]][1:3])
 
     def _match(self, m):
-        b'make sure the tokenizer matches an end condition'
+        """make sure the tokenizer matches an end condition"""
         if self.current[0] != m:
             raise error.ParseError(
                 _(b"unexpected token: %s") % self.current[0], self.current[2]
@@ -52,7 +52,8 @@
         self._advance()
 
     def _parseoperand(self, bind, m=None):
-        b'gather right-hand-side operand until an end condition or binding met'
+        """gather right-hand-side operand until an end condition or binding
+        met"""
         if m and self.current[0] == m:
             expr = None
         else:
@@ -85,7 +86,7 @@
         return expr
 
     def parse(self, tokeniter):
-        b'generate a parse tree from tokens'
+        """generate a parse tree from tokens"""
         self._iter = tokeniter
         self._advance()
         res = self._parse()
@@ -93,13 +94,13 @@
         return res, pos
 
     def eval(self, tree):
-        b'recursively evaluate a parse tree using node methods'
+        """recursively evaluate a parse tree using node methods"""
         if not isinstance(tree, tuple):
             return tree
         return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]])
 
     def __call__(self, tokeniter):
-        b'parse tokens into a parse tree and evaluate if methods given'
+        """parse tokens into a parse tree and evaluate if methods given"""
         t = self.parse(tokeniter)
         if self._methods:
             return self.eval(t)
--- a/mercurial/patch.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/patch.py	Tue Jan 21 13:14:51 2020 -0500
@@ -12,7 +12,6 @@
 import contextlib
 import copy
 import errno
-import hashlib
 import os
 import re
 import shutil
@@ -41,6 +40,7 @@
 )
 from .utils import (
     dateutil,
+    hashutil,
     procutil,
     stringutil,
 )
@@ -217,7 +217,7 @@
     fileobj did not contain a patch. Caller must unlink filename when done.'''
 
     fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
-    tmpfp = os.fdopen(fd, r'wb')
+    tmpfp = os.fdopen(fd, 'wb')
     try:
         yield _extract(ui, fileobj, tmpname, tmpfp)
     finally:
@@ -241,8 +241,8 @@
 
     msg = mail.parse(fileobj)
 
-    subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
-    data[b'user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
+    subject = msg['Subject'] and mail.headdecode(msg['Subject'])
+    data[b'user'] = msg['From'] and mail.headdecode(msg['From'])
     if not subject and not data[b'user']:
         # Not an email, restore parsed headers if any
         subject = (
@@ -255,7 +255,7 @@
     # should try to parse msg['Date']
     parents = []
 
-    nodeid = msg[r'X-Mercurial-Node']
+    nodeid = msg['X-Mercurial-Node']
     if nodeid:
         data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
         ui.debug(b'Node ID: %s\n' % nodeid)
@@ -383,7 +383,7 @@
         return self._ispatchinga(afile) and self._ispatchingb(bfile)
 
     def __repr__(self):
-        return r"<patchmeta %s %r>" % (self.op, self.path)
+        return "<patchmeta %s %r>" % (self.op, self.path)
 
 
 def readgitpatch(lr):
@@ -963,7 +963,9 @@
         return self.files()[-1]
 
     def __repr__(self):
-        return b'<header %s>' % (b' '.join(map(repr, self.files())))
+        return '<header %s>' % (
+            ' '.join(pycompat.rapply(pycompat.fsdecode, self.files()))
+        )
 
     def isnewfile(self):
         return any(self.newfile_re.match(h) for h in self.header)
@@ -1225,7 +1227,7 @@
                 ncpatchfp = None
                 try:
                     # Write the initial patch
-                    f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
+                    f = util.nativeeolwriter(os.fdopen(patchfd, 'wb'))
                     chunk.header.write(f)
                     chunk.write(f)
                     f.write(
@@ -1245,7 +1247,7 @@
                         ui.warn(_(b"editor exited with exit code %d\n") % ret)
                         continue
                     # Remove comment lines
-                    patchfp = open(patchfn, r'rb')
+                    patchfp = open(patchfn, 'rb')
                     ncpatchfp = stringio()
                     for line in util.iterfile(patchfp):
                         line = util.fromnativeeol(line)
@@ -1570,7 +1572,7 @@
 
 
 class binhunk(object):
-    b'A binary patch file.'
+    """A binary patch file."""
 
     def __init__(self, lr, fname):
         self.text = None
@@ -2605,7 +2607,14 @@
 
     if not changes:
         changes = ctx1.status(ctx2, match=match)
-    modified, added, removed = changes[:3]
+    if isinstance(changes, list):
+        modified, added, removed = changes[:3]
+    else:
+        modified, added, removed = (
+            changes.modified,
+            changes.added,
+            changes.removed,
+        )
 
     if not modified and not added and not removed:
         return []
@@ -2788,7 +2797,7 @@
 
 def difflabel(func, *args, **kw):
     '''yields 2-tuples of (output, label) based on the output of func()'''
-    if kw.get(r'opts') and kw[r'opts'].worddiff:
+    if kw.get('opts') and kw['opts'].worddiff:
         dodiffhunk = diffsinglehunkinline
     else:
         dodiffhunk = diffsinglehunk
@@ -2934,7 +2943,7 @@
         if not text:
             text = b""
         l = len(text)
-        s = hashlib.sha1(b'blob %d\0' % l)
+        s = hashutil.sha1(b'blob %d\0' % l)
         s.update(text)
         return hex(s.digest())
 
--- a/mercurial/pathutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/pathutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,10 +9,14 @@
 from . import (
     encoding,
     error,
+    policy,
     pycompat,
     util,
 )
 
+rustdirs = policy.importrust('dirstate', 'Dirs')
+parsers = policy.importmod('parsers')
+
 
 def _lowerclean(s):
     return encoding.hfsignoreclean(s.lower())
@@ -271,6 +275,66 @@
         return path
 
 
+def finddirs(path):
+    pos = path.rfind(b'/')
+    while pos != -1:
+        yield path[:pos]
+        pos = path.rfind(b'/', 0, pos)
+    yield b''
+
+
+class dirs(object):
+    '''a multiset of directory names from a set of file paths'''
+
+    def __init__(self, map, skip=None):
+        self._dirs = {}
+        addpath = self.addpath
+        if isinstance(map, dict) and skip is not None:
+            for f, s in pycompat.iteritems(map):
+                if s[0] != skip:
+                    addpath(f)
+        elif skip is not None:
+            raise error.ProgrammingError(
+                b"skip character is only supported with a dict source"
+            )
+        else:
+            for f in map:
+                addpath(f)
+
+    def addpath(self, path):
+        dirs = self._dirs
+        for base in finddirs(path):
+            if base.endswith(b'/'):
+                raise ValueError(
+                    "found invalid consecutive slashes in path: %r" % base
+                )
+            if base in dirs:
+                dirs[base] += 1
+                return
+            dirs[base] = 1
+
+    def delpath(self, path):
+        dirs = self._dirs
+        for base in finddirs(path):
+            if dirs[base] > 1:
+                dirs[base] -= 1
+                return
+            del dirs[base]
+
+    def __iter__(self):
+        return iter(self._dirs)
+
+    def __contains__(self, d):
+        return d in self._dirs
+
+
+if util.safehasattr(parsers, 'dirs'):
+    dirs = parsers.dirs
+
+if rustdirs is not None:
+    dirs = rustdirs
+
+
 # forward two methods from posixpath that do what we need, but we'd
 # rather not let our internals know that we're thinking in posix terms
 # - instead we'll let them be oblivious.
--- a/mercurial/phases.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/phases.py	Tue Jan 21 13:14:51 2020 -0500
@@ -112,6 +112,7 @@
     nullid,
     nullrev,
     short,
+    wdirrev,
 )
 from .pycompat import (
     getattr,
@@ -134,7 +135,7 @@
 public, draft, secret = range(3)
 internal = INTERNAL_FLAG | HIDEABLE_FLAG
 archived = HIDEABLE_FLAG
-allphases = range(internal + 1)
+allphases = list(range(internal + 1))
 trackedphases = allphases[1:]
 # record phase names
 cmdphasenames = [b'public', b'draft', b'secret']  # known to `hg phase` command
@@ -242,31 +243,51 @@
         """return a smartset for the given phases"""
         self.loadphaserevs(repo)  # ensure phase's sets are loaded
         phases = set(phases)
-        if public not in phases:
-            # fast path: _phasesets contains the interesting sets,
-            # might only need a union and post-filtering.
-            if len(phases) == 1:
-                [p] = phases
-                revs = self._phasesets[p]
-            else:
-                revs = set.union(*[self._phasesets[p] for p in phases])
+        publicphase = public in phases
+
+        if publicphase:
+            # In this case, phases keeps all the *other* phases.
+            phases = set(allphases).difference(phases)
+            if not phases:
+                return smartset.fullreposet(repo)
+
+        # fast path: _phasesets contains the interesting sets,
+        # might only need a union and post-filtering.
+        revsneedscopy = False
+        if len(phases) == 1:
+            [p] = phases
+            revs = self._phasesets[p]
+            revsneedscopy = True  # Don't modify _phasesets
+        else:
+            # revs has the revisions in all *other* phases.
+            revs = set.union(*[self._phasesets[p] for p in phases])
+
+        def _addwdir(wdirsubset, wdirrevs):
+            if wdirrev in wdirsubset and repo[None].phase() in phases:
+                if revsneedscopy:
+                    wdirrevs = wdirrevs.copy()
+                # The working dir would never be in the # cache, but it was in
+                # the subset being filtered for its phase (or filtered out,
+                # depending on publicphase), so add it to the output to be
+                # included (or filtered out).
+                wdirrevs.add(wdirrev)
+            return wdirrevs
+
+        if not publicphase:
             if repo.changelog.filteredrevs:
                 revs = revs - repo.changelog.filteredrevs
+
             if subset is None:
                 return smartset.baseset(revs)
             else:
+                revs = _addwdir(subset, revs)
                 return subset & smartset.baseset(revs)
         else:
-            phases = set(allphases).difference(phases)
-            if not phases:
-                return smartset.fullreposet(repo)
-            if len(phases) == 1:
-                [p] = phases
-                revs = self._phasesets[p]
-            else:
-                revs = set.union(*[self._phasesets[p] for p in phases])
             if subset is None:
                 subset = smartset.fullreposet(repo)
+
+            revs = _addwdir(subset, revs)
+
             if not revs:
                 return subset
             return subset.filter(lambda r: r not in revs)
@@ -512,9 +533,9 @@
         Nothing is lost as unknown nodes only hold data for their descendants.
         """
         filtered = False
-        nodemap = repo.changelog.nodemap  # to filter unknown nodes
+        has_node = repo.changelog.index.has_node  # to filter unknown nodes
         for phase, nodes in enumerate(self.phaseroots):
-            missing = sorted(node for node in nodes if node not in nodemap)
+            missing = sorted(node for node in nodes if not has_node(node))
             if missing:
                 for mnode in missing:
                     repo.ui.debug(
@@ -672,7 +693,7 @@
     repo = repo.unfiltered()
     # build list from dictionary
     draftroots = []
-    nodemap = repo.changelog.nodemap  # to filter unknown nodes
+    has_node = repo.changelog.index.has_node  # to filter unknown nodes
     for nhex, phase in pycompat.iteritems(roots):
         if nhex == b'publishing':  # ignore data related to publish option
             continue
@@ -688,7 +709,7 @@
                     % nhex
                 )
         elif phase == draft:
-            if node in nodemap:
+            if has_node(node):
                 draftroots.append(node)
         else:
             repo.ui.warn(
@@ -733,7 +754,7 @@
 
     repo = repo.unfiltered()
     cl = repo.changelog
-    rev = cl.nodemap.get
+    rev = cl.index.get_rev
     if not roots:
         return heads
     if not heads or heads == [nullid]:
--- a/mercurial/policy.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/policy.py	Tue Jan 21 13:14:51 2020 -0500
@@ -29,14 +29,14 @@
 policy = b'allow'
 _packageprefs = {
     # policy: (versioned package, pure package)
-    b'c': (r'cext', None),
-    b'allow': (r'cext', r'pure'),
-    b'cffi': (r'cffi', None),
-    b'cffi-allow': (r'cffi', r'pure'),
-    b'py': (None, r'pure'),
+    b'c': ('cext', None),
+    b'allow': ('cext', 'pure'),
+    b'cffi': ('cffi', None),
+    b'cffi-allow': ('cffi', 'pure'),
+    b'py': (None, 'pure'),
     # For now, rust policies impact importrust only
-    b'rust+c': (r'cext', None),
-    b'rust+c-allow': (r'cext', r'pure'),
+    b'rust+c': ('cext', None),
+    b'rust+c-allow': ('cext', 'pure'),
 }
 
 try:
@@ -50,15 +50,15 @@
 #
 # The canonical way to do this is to test platform.python_implementation().
 # But we don't import platform and don't bloat for it here.
-if r'__pypy__' in sys.builtin_module_names:
+if '__pypy__' in sys.builtin_module_names:
     policy = b'cffi'
 
 # Environment variable can always force settings.
 if sys.version_info[0] >= 3:
-    if r'HGMODULEPOLICY' in os.environ:
-        policy = os.environ[r'HGMODULEPOLICY'].encode(r'utf-8')
+    if 'HGMODULEPOLICY' in os.environ:
+        policy = os.environ['HGMODULEPOLICY'].encode('utf-8')
 else:
-    policy = os.environ.get(r'HGMODULEPOLICY', policy)
+    policy = os.environ.get('HGMODULEPOLICY', policy)
 
 
 def _importfrom(pkgname, modname):
@@ -68,7 +68,7 @@
     try:
         fakelocals[modname] = mod = getattr(pkg, modname)
     except AttributeError:
-        raise ImportError(r'cannot import name %s' % modname)
+        raise ImportError('cannot import name %s' % modname)
     # force import; fakelocals[modname] may be replaced with the real module
     getattr(mod, '__doc__', None)
     return fakelocals[modname]
@@ -76,19 +76,19 @@
 
 # keep in sync with "version" in C modules
 _cextversions = {
-    (r'cext', r'base85'): 1,
-    (r'cext', r'bdiff'): 3,
-    (r'cext', r'mpatch'): 1,
-    (r'cext', r'osutil'): 4,
-    (r'cext', r'parsers'): 13,
+    ('cext', 'base85'): 1,
+    ('cext', 'bdiff'): 3,
+    ('cext', 'mpatch'): 1,
+    ('cext', 'osutil'): 4,
+    ('cext', 'parsers'): 16,
 }
 
 # map import request to other package or module
 _modredirects = {
-    (r'cext', r'charencode'): (r'cext', r'parsers'),
-    (r'cffi', r'base85'): (r'pure', r'base85'),
-    (r'cffi', r'charencode'): (r'pure', r'charencode'),
-    (r'cffi', r'parsers'): (r'pure', r'parsers'),
+    ('cext', 'charencode'): ('cext', 'parsers'),
+    ('cffi', 'base85'): ('pure', 'base85'),
+    ('cffi', 'charencode'): ('pure', 'charencode'),
+    ('cffi', 'parsers'): ('pure', 'parsers'),
 }
 
 
@@ -97,8 +97,8 @@
     actual = getattr(mod, 'version', None)
     if actual != expected:
         raise ImportError(
-            r'cannot import module %s.%s '
-            r'(expected version: %d, actual: %r)'
+            'cannot import module %s.%s '
+            '(expected version: %d, actual: %r)'
             % (pkgname, modname, expected, actual)
         )
 
@@ -108,7 +108,7 @@
     try:
         verpkg, purepkg = _packageprefs[policy]
     except KeyError:
-        raise ImportError(r'invalid HGMODULEPOLICY %r' % policy)
+        raise ImportError('invalid HGMODULEPOLICY %r' % policy)
     assert verpkg or purepkg
     if verpkg:
         pn, mn = _modredirects.get((verpkg, modname), (verpkg, modname))
@@ -141,7 +141,7 @@
         return default
 
     try:
-        mod = _importfrom(r'rustext', modname)
+        mod = _importfrom('rustext', modname)
     except ImportError:
         if _isrustpermissive():
             return default
@@ -154,4 +154,4 @@
     except AttributeError:
         if _isrustpermissive():
             return default
-        raise ImportError(r"Cannot import name %s" % member)
+        raise ImportError("Cannot import name %s" % member)
--- a/mercurial/posix.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/posix.py	Tue Jan 21 13:14:51 2020 -0500
@@ -32,7 +32,7 @@
     pycompat,
 )
 
-osutil = policy.importmod(r'osutil')
+osutil = policy.importmod('osutil')
 
 normpath = os.path.normpath
 samestat = os.path.samestat
@@ -60,11 +60,11 @@
 
 if not pycompat.ispy3:
 
-    def posixfile(name, mode=r'r', buffering=-1):
+    def posixfile(name, mode='r', buffering=-1):
         fp = open(name, mode=mode, buffering=buffering)
         # The position when opening in append mode is implementation defined, so
         # make it consistent by always seeking to the end.
-        if r'a' in mode:
+        if 'a' in mode:
             fp.seek(0, os.SEEK_END)
         return fp
 
@@ -323,7 +323,10 @@
                     fullpath = os.path.join(cachedir, target)
                     open(fullpath, b'w').close()
                 except IOError as inst:
-                    if inst[0] == errno.EACCES:
+                    if (
+                        inst[0]  # pytype: disable=unsupported-operands
+                        == errno.EACCES
+                    ):
                         # If we can't write to cachedir, just pretend
                         # that the fs is readonly and by association
                         # that the fs won't support symlinks. This
@@ -463,7 +466,7 @@
             u = s.decode('utf-8')
 
         # Decompose then lowercase (HFS+ technote specifies lower)
-        enc = unicodedata.normalize(r'NFD', u).lower().encode('utf-8')
+        enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
         # drop HFS+ ignored characters
         return encoding.hfsignoreclean(enc)
 
--- a/mercurial/profiling.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/profiling.py	Tue Jan 21 13:14:51 2020 -0500
@@ -77,7 +77,7 @@
 @contextlib.contextmanager
 def flameprofile(ui, fp):
     try:
-        from flamegraph import flamegraph
+        from flamegraph import flamegraph  # pytype: disable=import-error
     except ImportError:
         raise error.Abort(
             _(
@@ -167,9 +167,9 @@
         elif profformat == b'hotpath':
             # inconsistent config: profiling.showmin
             limit = ui.configwith(fraction, b'profiling', b'showmin', 0.05)
-            kwargs[r'limit'] = limit
+            kwargs['limit'] = limit
             showtime = ui.configbool(b'profiling', b'showtime')
-            kwargs[r'showtime'] = showtime
+            kwargs['showtime'] = showtime
 
         statprof.display(fp, data=data, format=displayformat, **kwargs)
 
@@ -204,7 +204,7 @@
 
         If the profiler was already started, this has no effect."""
         if not self._entered:
-            raise error.ProgrammingError()
+            raise error.ProgrammingError(b'use a context manager to start')
         if self._started:
             return
         self._started = True
--- a/mercurial/pure/charencode.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/pure/charencode.py	Tue Jan 21 13:14:51 2020 -0500
@@ -85,6 +85,6 @@
         jm = _jsonmap
     # non-BMP char is represented as UTF-16 surrogate pair
     u16b = u8chars.decode('utf-8', _utf8strict).encode('utf-16', _utf8strict)
-    u16codes = array.array(r'H', u16b)
+    u16codes = array.array('H', u16b)
     u16codes.pop(0)  # drop BOM
     return b''.join(jm[x] if x < 128 else b'\\u%04x' % x for x in u16codes)
--- a/mercurial/pure/osutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/pure/osutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -218,7 +218,7 @@
     def _raiseioerror(name):
         err = ctypes.WinError()
         raise IOError(
-            err.errno, r'%s: %s' % (encoding.strfromlocal(name), err.strerror)
+            err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
         )
 
     class posixfile(object):
@@ -257,7 +257,7 @@
                 creation = _OPEN_ALWAYS
                 flags |= _O_APPEND
             else:
-                raise ValueError(r"invalid mode: %s" % pycompat.sysstr(mode))
+                raise ValueError("invalid mode: %s" % pycompat.sysstr(mode))
 
             fh = _kernel32.CreateFileA(
                 name,
@@ -280,8 +280,8 @@
             # unfortunately, f.name is '<fdopen>' at this point -- so we store
             # the name on this wrapper. We cannot just assign to f.name,
             # because that attribute is read-only.
-            object.__setattr__(self, r'name', name)
-            object.__setattr__(self, r'_file', f)
+            object.__setattr__(self, 'name', name)
+            object.__setattr__(self, '_file', f)
 
         def __iter__(self):
             return self._file
--- a/mercurial/pure/parsers.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/pure/parsers.py	Tue Jan 21 13:14:51 2020 -0500
@@ -10,8 +10,13 @@
 import struct
 import zlib
 
-from ..node import nullid
-from .. import pycompat
+from ..node import nullid, nullrev
+from .. import (
+    pycompat,
+    util,
+)
+
+from ..revlogutils import nodemap as nodemaputil
 
 stringio = pycompat.bytesio
 
@@ -43,10 +48,51 @@
 
 
 class BaseIndexObject(object):
+    @property
+    def nodemap(self):
+        msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
+        util.nouideprecwarn(msg, b'5.3', stacklevel=2)
+        return self._nodemap
+
+    @util.propertycache
+    def _nodemap(self):
+        nodemap = nodemaputil.NodeMap({nullid: nullrev})
+        for r in range(0, len(self)):
+            n = self[r][7]
+            nodemap[n] = r
+        return nodemap
+
+    def has_node(self, node):
+        """return True if the node exist in the index"""
+        return node in self._nodemap
+
+    def rev(self, node):
+        """return a revision for a node
+
+        If the node is unknown, raise a RevlogError"""
+        return self._nodemap[node]
+
+    def get_rev(self, node):
+        """return a revision for a node
+
+        If the node is unknown, return None"""
+        return self._nodemap.get(node)
+
+    def _stripnodes(self, start):
+        if '_nodemap' in vars(self):
+            for r in range(start, len(self)):
+                n = self[r][7]
+                del self._nodemap[n]
+
+    def clearcaches(self):
+        self.__dict__.pop('_nodemap', None)
+
     def __len__(self):
         return self._lgt + len(self._extra)
 
     def append(self, tup):
+        if '_nodemap' in vars(self):
+            self._nodemap[tup[7]] = len(self)
         self._extra.append(tup)
 
     def _check_index(self, i):
@@ -86,6 +132,7 @@
             raise ValueError(b"deleting slices only supports a:-1 with step 1")
         i = i.start
         self._check_index(i)
+        self._stripnodes(i)
         if i < self._lgt:
             self._data = self._data[: i * indexsize]
             self._lgt = i
@@ -123,6 +170,7 @@
             raise ValueError(b"deleting slices only supports a:-1 with step 1")
         i = i.start
         self._check_index(i)
+        self._stripnodes(i)
         if i < self._lgt:
             self._offsets = self._offsets[:i]
             self._lgt = i
--- a/mercurial/pvec.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/pvec.py	Tue Jan 21 13:14:51 2020 -0500
@@ -48,7 +48,7 @@
   different branches
 '''
 
-from __future__ import absolute_import
+from __future__ import absolute_import, division
 
 from .node import nullrev
 from . import (
@@ -57,12 +57,12 @@
 )
 
 _size = 448  # 70 chars b85-encoded
-_bytes = _size / 8
+_bytes = _size // 8
 _depthbits = 24
-_depthbytes = _depthbits / 8
+_depthbytes = _depthbits // 8
 _vecbytes = _bytes - _depthbytes
 _vecbits = _vecbytes * 8
-_radius = (_vecbits - 30) / 2  # high probability vectors are related
+_radius = (_vecbits - 30) // 2  # high probability vectors are related
 
 
 def _bin(bs):
@@ -74,9 +74,10 @@
 
 
 def _str(v, l):
+    # type: (int, int) -> bytes
     bs = b""
     for p in pycompat.xrange(l):
-        bs = chr(v & 255) + bs
+        bs = pycompat.bytechr(v & 255) + bs
         v >>= 8
     return bs
 
@@ -131,7 +132,7 @@
     if hdist > ddist:
         # if delta = 10 and hdist = 100, then we need to go up 55 steps
         # to the ancestor and down 45
-        changes = (hdist - ddist + 1) / 2
+        changes = (hdist - ddist + 1) // 2
     else:
         # must make at least one change
         changes = 1
--- a/mercurial/pycompat.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/pycompat.py	Tue Jan 21 13:14:51 2020 -0500
@@ -19,7 +19,13 @@
 import tempfile
 
 ispy3 = sys.version_info[0] >= 3
-ispypy = r'__pypy__' in sys.builtin_module_names
+ispypy = '__pypy__' in sys.builtin_module_names
+TYPE_CHECKING = False
+
+if not globals():  # hide this from non-pytype users
+    import typing
+
+    TYPE_CHECKING = typing.TYPE_CHECKING
 
 if not ispy3:
     import cookielib
@@ -94,6 +100,13 @@
     import io
     import struct
 
+    if os.name == r'nt' and sys.version_info >= (3, 6):
+        # MBCS (or ANSI) filesystem encoding must be used as before.
+        # Otherwise non-ASCII filenames in existing repositories would be
+        # corrupted.
+        # This must be set once prior to any fsencode/fsdecode calls.
+        sys._enablelegacywindowsfsencoding()  # pytype: disable=module-attr
+
     fsencode = os.fsencode
     fsdecode = os.fsdecode
     oscurdir = os.curdir.encode('ascii')
@@ -105,6 +118,7 @@
     osaltsep = os.altsep
     if osaltsep:
         osaltsep = osaltsep.encode('ascii')
+    osdevnull = os.devnull.encode('ascii')
 
     sysplatform = sys.platform.encode('ascii')
     sysexecutable = sys.executable
@@ -139,12 +153,12 @@
     #
     # https://hg.python.org/cpython/file/v3.5.1/Programs/python.c#l55
     #
-    # TODO: On Windows, the native argv is wchar_t, so we'll need a different
-    # workaround to simulate the Python 2 (i.e. ANSI Win32 API) behavior.
+    # On Windows, the native argv is unicode and is converted to MBCS bytes
+    # since we do enable the legacy filesystem encoding.
     if getattr(sys, 'argv', None) is not None:
         sysargv = list(map(os.fsencode, sys.argv))
 
-    bytechr = struct.Struct(r'>B').pack
+    bytechr = struct.Struct('>B').pack
     byterepr = b'%r'.__mod__
 
     class bytestr(bytes):
@@ -239,6 +253,8 @@
         This never raises UnicodeEncodeError, but only ASCII characters
         can be round-trip by sysstr(sysbytes(s)).
         """
+        if isinstance(s, bytes):
+            return s
         return s.encode('utf-8')
 
     def sysstr(s):
@@ -416,7 +432,7 @@
         if isinstance(filename, str):
             return filename
         else:
-            raise TypeError(r"expect str, not %s" % type(filename).__name__)
+            raise TypeError("expect str, not %s" % type(filename).__name__)
 
     # In Python 2, fsdecode() has a very chance to receive bytes. So it's
     # better not to touch Python 2 part as it's already working fine.
@@ -443,6 +459,7 @@
     ospardir = os.pardir
     ossep = os.sep
     osaltsep = os.altsep
+    osdevnull = os.devnull
     long = long
     stdin = sys.stdin
     stdout = sys.stdout
@@ -493,7 +510,7 @@
     mode=b'w+b', bufsize=-1, suffix=b'', prefix=b'tmp', dir=None, delete=True
 ):
     mode = sysstr(mode)
-    assert r'b' in mode
+    assert 'b' in mode
     return tempfile.NamedTemporaryFile(
         mode, bufsize, suffix=suffix, prefix=prefix, dir=dir, delete=delete
     )
--- a/mercurial/rcutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/rcutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -15,6 +15,8 @@
     util,
 )
 
+from .utils import resourceutil
+
 if pycompat.iswindows:
     from . import scmwindows as scmplatform
 else:
@@ -59,13 +61,15 @@
     return result
 
 
-def defaultrcpath():
-    '''return rc paths in default.d'''
-    path = []
-    defaultpath = os.path.join(util.datapath, b'default.d')
-    if os.path.isdir(defaultpath):
-        path = _expandrcpath(defaultpath)
-    return path
+def default_rc_resources():
+    """return rc resource IDs in defaultrc"""
+    rsrcs = resourceutil.contents(b'mercurial.defaultrc')
+    return [
+        (b'mercurial.defaultrc', r)
+        for r in sorted(rsrcs)
+        if resourceutil.is_resource(b'mercurial.defaultrc', r)
+        and r.endswith(b'.rc')
+    ]
 
 
 def rccomponents():
@@ -76,9 +80,10 @@
 
     if a directory is provided, *.rc files under it will be used.
 
-    type could be either 'path' or 'items', if type is 'path', obj is a string,
-    and is the config file path. if type is 'items', obj is a list of (section,
-    name, value, source) that should fill the config directly.
+    type could be either 'path', 'items' or 'resource'. If type is 'path',
+    obj is a string, and is the config file path. if type is 'items', obj is a
+    list of (section, name, value, source) that should fill the config directly.
+    If type is 'resource', obj is a tuple of (package name, resource name).
     '''
     envrc = (b'items', envrcitems())
 
@@ -91,10 +96,12 @@
                 continue
             _rccomponents.extend((b'path', p) for p in _expandrcpath(p))
     else:
+        _rccomponents = [(b'resource', r) for r in default_rc_resources()]
+
         normpaths = lambda paths: [
             (b'path', os.path.normpath(p)) for p in paths
         ]
-        _rccomponents = normpaths(defaultrcpath() + systemrcpath())
+        _rccomponents.extend(normpaths(systemrcpath()))
         _rccomponents.append(envrc)
         _rccomponents.extend(normpaths(userrcpath()))
     return _rccomponents
--- a/mercurial/repair.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/repair.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,7 +9,6 @@
 from __future__ import absolute_import
 
 import errno
-import hashlib
 
 from .i18n import _
 from .node import (
@@ -24,11 +23,15 @@
     exchange,
     obsolete,
     obsutil,
+    pathutil,
     phases,
     pycompat,
     util,
 )
-from .utils import stringutil
+from .utils import (
+    hashutil,
+    stringutil,
+)
 
 
 def backupbundle(
@@ -44,7 +47,7 @@
     # Include a hash of all the nodes in the filename for uniqueness
     allcommits = repo.set(b'%ln::%ln', bases, heads)
     allhashes = sorted(c.hex() for c in allcommits)
-    totalhash = hashlib.sha1(b''.join(allhashes)).digest()
+    totalhash = hashutil.sha1(b''.join(allhashes)).digest()
     name = b"%s/%s-%s-%s.hg" % (
         backupdir,
         short(node),
@@ -476,7 +479,7 @@
         if b'treemanifest' in repo.requirements:
             # This logic is safe if treemanifest isn't enabled, but also
             # pointless, so we skip it if treemanifest isn't enabled.
-            for dir in util.dirs(seenfiles):
+            for dir in pathutil.dirs(seenfiles):
                 i = b'meta/%s/00manifest.i' % dir
                 d = b'meta/%s/00manifest.d' % dir
 
--- a/mercurial/repoview.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/repoview.py	Tue Jan 21 13:14:51 2020 -0500
@@ -11,13 +11,18 @@
 import copy
 import weakref
 
-from .node import nullrev
+from .i18n import _
+from .node import (
+    hex,
+    nullrev,
+)
 from .pycompat import (
     delattr,
     getattr,
     setattr,
 )
 from . import (
+    error,
     obsolete,
     phases,
     pycompat,
@@ -54,8 +59,9 @@
     tags = {}
     tagsmod.readlocaltags(repo.ui, repo, tags, {})
     if tags:
-        rev, nodemap = cl.rev, cl.nodemap
-        pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
+        rev = cl.index.get_rev
+        pinned.update(rev(t[0]) for t in tags.values())
+        pinned.discard(None)
     return pinned
 
 
@@ -171,6 +177,9 @@
     b'base': computeimpactable,
 }
 
+# set of filter level that will include the working copy parent no matter what.
+filter_has_wc = {b'visible', b'visible-hidden'}
+
 _basefiltername = list(filtertable)
 
 
@@ -211,6 +220,19 @@
     hidden-state and must be visible. They are dynamic and hence we should not
     cache it's result"""
     if filtername not in repo.filteredrevcache:
+        if repo.ui.configbool(b'devel', b'debug.repo-filters'):
+            msg = b'computing revision filter for "%s"'
+            msg %= filtername
+            if repo.ui.tracebackflag and repo.ui.debugflag:
+                # XXX use ui.write_err
+                util.debugstacktrace(
+                    msg,
+                    f=repo.ui._fout,
+                    otherf=repo.ui._ferr,
+                    prefix=b'debug.filters: ',
+                )
+            else:
+                repo.ui.debug(b'debug.filters: %s\n' % msg)
         func = filtertable[filtername]
         if visibilityexceptions:
             return func(repo.unfiltered, visibilityexceptions)
@@ -218,6 +240,118 @@
     return repo.filteredrevcache[filtername]
 
 
+def wrapchangelog(unfichangelog, filteredrevs):
+    cl = copy.copy(unfichangelog)
+    cl.filteredrevs = filteredrevs
+
+    class filteredchangelog(filteredchangelogmixin, cl.__class__):
+        pass
+
+    cl.__class__ = filteredchangelog
+
+    return cl
+
+
+class filteredchangelogmixin(object):
+    def tiprev(self):
+        """filtered version of revlog.tiprev"""
+        for i in pycompat.xrange(len(self) - 1, -2, -1):
+            if i not in self.filteredrevs:
+                return i
+
+    def __contains__(self, rev):
+        """filtered version of revlog.__contains__"""
+        return 0 <= rev < len(self) and rev not in self.filteredrevs
+
+    def __iter__(self):
+        """filtered version of revlog.__iter__"""
+
+        def filterediter():
+            for i in pycompat.xrange(len(self)):
+                if i not in self.filteredrevs:
+                    yield i
+
+        return filterediter()
+
+    def revs(self, start=0, stop=None):
+        """filtered version of revlog.revs"""
+        for i in super(filteredchangelogmixin, self).revs(start, stop):
+            if i not in self.filteredrevs:
+                yield i
+
+    def _checknofilteredinrevs(self, revs):
+        """raise the appropriate error if 'revs' contains a filtered revision
+
+        This returns a version of 'revs' to be used thereafter by the caller.
+        In particular, if revs is an iterator, it is converted into a set.
+        """
+        safehasattr = util.safehasattr
+        if safehasattr(revs, '__next__'):
+            # Note that inspect.isgenerator() is not true for iterators,
+            revs = set(revs)
+
+        filteredrevs = self.filteredrevs
+        if safehasattr(revs, 'first'):  # smartset
+            offenders = revs & filteredrevs
+        else:
+            offenders = filteredrevs.intersection(revs)
+
+        for rev in offenders:
+            raise error.FilteredIndexError(rev)
+        return revs
+
+    def headrevs(self, revs=None):
+        if revs is None:
+            try:
+                return self.index.headrevsfiltered(self.filteredrevs)
+            # AttributeError covers non-c-extension environments and
+            # old c extensions without filter handling.
+            except AttributeError:
+                return self._headrevs()
+
+        revs = self._checknofilteredinrevs(revs)
+        return super(filteredchangelogmixin, self).headrevs(revs)
+
+    def strip(self, *args, **kwargs):
+        # XXX make something better than assert
+        # We can't expect proper strip behavior if we are filtered.
+        assert not self.filteredrevs
+        super(filteredchangelogmixin, self).strip(*args, **kwargs)
+
+    def rev(self, node):
+        """filtered version of revlog.rev"""
+        r = super(filteredchangelogmixin, self).rev(node)
+        if r in self.filteredrevs:
+            raise error.FilteredLookupError(
+                hex(node), self.indexfile, _(b'filtered node')
+            )
+        return r
+
+    def node(self, rev):
+        """filtered version of revlog.node"""
+        if rev in self.filteredrevs:
+            raise error.FilteredIndexError(rev)
+        return super(filteredchangelogmixin, self).node(rev)
+
+    def linkrev(self, rev):
+        """filtered version of revlog.linkrev"""
+        if rev in self.filteredrevs:
+            raise error.FilteredIndexError(rev)
+        return super(filteredchangelogmixin, self).linkrev(rev)
+
+    def parentrevs(self, rev):
+        """filtered version of revlog.parentrevs"""
+        if rev in self.filteredrevs:
+            raise error.FilteredIndexError(rev)
+        return super(filteredchangelogmixin, self).parentrevs(rev)
+
+    def flags(self, rev):
+        """filtered version of revlog.flags"""
+        if rev in self.filteredrevs:
+            raise error.FilteredIndexError(rev)
+        return super(filteredchangelogmixin, self).flags(rev)
+
+
 class repoview(object):
     """Provide a read/write view of a repo through a filtered changelog
 
@@ -254,12 +388,12 @@
     """
 
     def __init__(self, repo, filtername, visibilityexceptions=None):
-        object.__setattr__(self, r'_unfilteredrepo', repo)
-        object.__setattr__(self, r'filtername', filtername)
-        object.__setattr__(self, r'_clcachekey', None)
-        object.__setattr__(self, r'_clcache', None)
+        object.__setattr__(self, '_unfilteredrepo', repo)
+        object.__setattr__(self, 'filtername', filtername)
+        object.__setattr__(self, '_clcachekey', None)
+        object.__setattr__(self, '_clcache', None)
         # revs which are exceptions and must not be hidden
-        object.__setattr__(self, r'_visibilityexceptions', visibilityexceptions)
+        object.__setattr__(self, '_visibilityexceptions', visibilityexceptions)
 
     # not a propertycache on purpose we shall implement a proper cache later
     @property
@@ -286,10 +420,10 @@
             cl = None
         # could have been made None by the previous if
         if cl is None:
-            cl = copy.copy(unfichangelog)
-            cl.filteredrevs = revs
-            object.__setattr__(self, r'_clcache', cl)
-            object.__setattr__(self, r'_clcachekey', newkey)
+            # Only filter if there's something to filter
+            cl = wrapchangelog(unfichangelog, revs) if revs else unfichangelog
+            object.__setattr__(self, '_clcache', cl)
+            object.__setattr__(self, '_clcachekey', newkey)
         return cl
 
     def unfiltered(self):
@@ -303,7 +437,7 @@
         return self.unfiltered().filtered(name, visibilityexceptions)
 
     def __repr__(self):
-        return r'<%s:%s %r>' % (
+        return '<%s:%s %r>' % (
             self.__class__.__name__,
             pycompat.sysstr(self.filtername),
             self.unfiltered(),
--- a/mercurial/revlog.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/revlog.py	Tue Jan 21 13:14:51 2020 -0500
@@ -75,6 +75,7 @@
 from .revlogutils import (
     deltas as deltautil,
     flagutil,
+    nodemap as nodemaputil,
     sidedata as sidedatautil,
 )
 from .utils import (
@@ -102,9 +103,10 @@
 REVIDX_FLAGS_ORDER
 REVIDX_RAWTEXT_CHANGING_FLAGS
 
-parsers = policy.importmod(r'parsers')
-rustancestor = policy.importrust(r'ancestor')
-rustdagop = policy.importrust(r'dagop')
+parsers = policy.importmod('parsers')
+rustancestor = policy.importrust('ancestor')
+rustdagop = policy.importrust('dagop')
+rustrevlog = policy.importrust('revlog')
 
 # Aliased for performance.
 _zlibdecompress = zlib.decompress
@@ -147,6 +149,16 @@
     return int(int(offset) << 16 | type)
 
 
+def _verify_revision(rl, skipflags, state, node):
+    """Verify the integrity of the given revlog ``node`` while providing a hook
+    point for extensions to influence the operation."""
+    if skipflags:
+        state[b'skipread'].add(node)
+    else:
+        # Side-effect: read content and verify hash.
+        rl.revision(node)
+
+
 @attr.s(slots=True, frozen=True)
 class _revisioninfo(object):
     """Information about a revision that allows building its fulltext
@@ -204,6 +216,50 @@
 
 
 class revlogoldindex(list):
+    @property
+    def nodemap(self):
+        msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
+        util.nouideprecwarn(msg, b'5.3', stacklevel=2)
+        return self._nodemap
+
+    @util.propertycache
+    def _nodemap(self):
+        nodemap = nodemaputil.NodeMap({nullid: nullrev})
+        for r in range(0, len(self)):
+            n = self[r][7]
+            nodemap[n] = r
+        return nodemap
+
+    def has_node(self, node):
+        """return True if the node exist in the index"""
+        return node in self._nodemap
+
+    def rev(self, node):
+        """return a revision for a node
+
+        If the node is unknown, raise a RevlogError"""
+        return self._nodemap[node]
+
+    def get_rev(self, node):
+        """return a revision for a node
+
+        If the node is unknown, return None"""
+        return self._nodemap.get(node)
+
+    def append(self, tup):
+        self._nodemap[tup[7]] = len(self)
+        super(revlogoldindex, self).append(tup)
+
+    def __delitem__(self, i):
+        if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
+            raise ValueError(b"deleting slices only supports a:-1 with step 1")
+        for r in pycompat.xrange(i.start, len(self)):
+            del self._nodemap[self[r][7]]
+        super(revlogoldindex, self).__delitem__(i)
+
+    def clearcaches(self):
+        self.__dict__.pop('_nodemap', None)
+
     def __getitem__(self, i):
         if i == -1:
             return (0, 0, 0, -1, -1, -1, -1, nullid)
@@ -217,7 +273,7 @@
     def parseindex(self, data, inline):
         s = self.size
         index = []
-        nodemap = {nullid: nullrev}
+        nodemap = nodemaputil.NodeMap({nullid: nullrev})
         n = off = 0
         l = len(data)
         while off + s <= l:
@@ -239,7 +295,8 @@
             nodemap[e[6]] = n
             n += 1
 
-        return revlogoldindex(index), nodemap, None
+        index = revlogoldindex(index)
+        return index, None
 
     def packentry(self, entry, node, version, rev):
         if gettype(entry[0]):
@@ -286,7 +343,7 @@
     def parseindex(self, data, inline):
         # call the C implementation to parse the index data
         index, cache = parsers.parse_index2(data, inline)
-        return index, getattr(index, 'nodemap', None), cache
+        return index, cache
 
     def packentry(self, entry, node, version, rev):
         p = indexformatng_pack(*entry)
@@ -295,6 +352,12 @@
         return p
 
 
+class rustrevlogio(revlogio):
+    def parseindex(self, data, inline):
+        index, cache = super(rustrevlogio, self).parseindex(data, inline)
+        return rustrevlog.MixedIndex(index), cache
+
+
 class revlog(object):
     """
     the underlying revision storage object
@@ -371,12 +434,10 @@
         self._chunkcachesize = 65536
         self._maxchainlen = None
         self._deltabothparents = True
-        self.index = []
+        self.index = None
         # Mapping of partial identifiers to full nodes.
         self._pcache = {}
         # Mapping of revision integer to full node.
-        self._nodecache = {nullid: nullrev}
-        self._nodepos = None
         self._compengine = b'zlib'
         self._compengineopts = {}
         self._maxdeltachainspan = -1
@@ -533,15 +594,15 @@
         self._io = revlogio()
         if self.version == REVLOGV0:
             self._io = revlogoldio()
+        elif rustrevlog is not None and self.opener.options.get(b'rust.index'):
+            self._io = rustrevlogio()
         try:
             d = self._io.parseindex(indexdata, self._inline)
         except (ValueError, IndexError):
             raise error.RevlogError(
                 _(b"index %s is corrupted") % self.indexfile
             )
-        self.index, nodemap, self._chunkcache = d
-        if nodemap is not None:
-            self.nodemap = self._nodecache = nodemap
+        self.index, self._chunkcache = d
         if not self._chunkcache:
             self._chunkclear()
         # revnum -> (chain-length, sum-delta-length)
@@ -556,11 +617,11 @@
 
     def _indexfp(self, mode=b'r'):
         """file object for the revlog's index file"""
-        args = {r'mode': mode}
+        args = {'mode': mode}
         if mode != b'r':
-            args[r'checkambig'] = self._checkambig
+            args['checkambig'] = self._checkambig
         if mode == b'w':
-            args[r'atomictemp'] = True
+            args['atomictemp'] = True
         return self.opener(self.indexfile, **args)
 
     def _datafp(self, mode=b'r'):
@@ -593,8 +654,11 @@
             with func() as fp:
                 yield fp
 
+    def tiprev(self):
+        return len(self.index) - 1
+
     def tip(self):
-        return self.node(len(self.index) - 1)
+        return self.node(self.tiprev())
 
     def __contains__(self, rev):
         return 0 <= rev < len(self)
@@ -609,13 +673,20 @@
         """iterate over all rev in this revlog (from start to stop)"""
         return storageutil.iterrevs(len(self), start=start, stop=stop)
 
-    @util.propertycache
+    @property
     def nodemap(self):
-        if self.index:
-            # populate mapping down to the initial node
-            node0 = self.index[0][7]  # get around changelog filtering
-            self.rev(node0)
-        return self._nodecache
+        msg = (
+            b"revlog.nodemap is deprecated, "
+            b"use revlog.index.[has_node|rev|get_rev]"
+        )
+        util.nouideprecwarn(msg, b'5.3', stacklevel=2)
+        return self.index.nodemap
+
+    @property
+    def _nodecache(self):
+        msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
+        util.nouideprecwarn(msg, b'5.3', stacklevel=2)
+        return self.index.nodemap
 
     def hasnode(self, node):
         try:
@@ -642,19 +713,11 @@
         self._chainbasecache.clear()
         self._chunkcache = (0, b'')
         self._pcache = {}
-
-        try:
-            # If we are using the native C version, you are in a fun case
-            # where self.index, self.nodemap and self._nodecaches is the same
-            # object.
-            self._nodecache.clearcaches()
-        except AttributeError:
-            self._nodecache = {nullid: nullrev}
-            self._nodepos = None
+        self.index.clearcaches()
 
     def rev(self, node):
         try:
-            return self._nodecache[node]
+            return self.index.rev(node)
         except TypeError:
             raise
         except error.RevlogError:
@@ -662,24 +725,6 @@
             if node == wdirid or node in wdirfilenodeids:
                 raise error.WdirUnsupported
             raise error.LookupError(node, self.indexfile, _(b'no node'))
-        except KeyError:
-            # pure python cache lookup failed
-            n = self._nodecache
-            i = self.index
-            p = self._nodepos
-            if p is None:
-                p = len(i) - 1
-            else:
-                assert p < len(i)
-            for r in pycompat.xrange(p, -1, -1):
-                v = i[r][7]
-                n[v] = r
-                if v == node:
-                    self._nodepos = r - 1
-                    return r
-            if node == wdirid or node in wdirfilenodeids:
-                raise error.WdirUnsupported
-            raise error.LookupError(node, self.indexfile, _(b'no node'))
 
     # Accessors for index entries.
 
@@ -1253,7 +1298,7 @@
         return bool(self.reachableroots(a, [b], [a], includepath=False))
 
     def reachableroots(self, minroot, heads, roots, includepath=False):
-        """return (heads(::<roots> and <roots>::<heads>))
+        """return (heads(::(<roots> and <roots>::<heads>)))
 
         If includepath is True, return (<roots>::<heads>)."""
         try:
@@ -1736,10 +1781,8 @@
         if node == nullid:
             return b"", {}
 
-        # The text as stored inside the revlog. Might be the revision or might
-        # need to be processed to retrieve the revision.
-        rawtext = None
-
+        # ``rawtext`` is the text as stored inside the revlog. Might be the
+        # revision or might need to be processed to retrieve the revision.
         rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
 
         if raw and validated:
@@ -1986,7 +2029,7 @@
             )
 
         node = node or self.hash(rawtext, p1, p2)
-        if node in self.nodemap:
+        if self.index.has_node(node):
             return node
 
         if validatehash:
@@ -2195,12 +2238,6 @@
             node,
         )
         self.index.append(e)
-        self.nodemap[node] = curr
-
-        # Reset the pure node cache start lookup offset to account for new
-        # revision.
-        if self._nodepos is not None:
-            self._nodepos = curr
 
         entry = self._io.packentry(e, self.node, self.version, curr)
         self._writeentry(
@@ -2298,18 +2335,18 @@
 
                 nodes.append(node)
 
-                if node in self.nodemap:
+                if self.index.has_node(node):
                     self._nodeduplicatecallback(transaction, node)
                     # this can happen if two branches make the same change
                     continue
 
                 for p in (p1, p2):
-                    if p not in self.nodemap:
+                    if not self.index.has_node(p):
                         raise error.LookupError(
                             p, self.indexfile, _(b'unknown parent')
                         )
 
-                if deltabase not in self.nodemap:
+                if not self.index.has_node(deltabase):
                     raise error.LookupError(
                         deltabase, self.indexfile, _(b'unknown delta base')
                     )
@@ -2434,11 +2471,8 @@
         self._revisioncache = None
         self._chaininfocache = {}
         self._chunkclear()
-        for x in pycompat.xrange(rev, len(self)):
-            del self.nodemap[self.node(x)]
 
         del self.index[rev:-1]
-        self._nodepos = None
 
     def checksize(self):
         """Check size of index and data files
@@ -2840,6 +2874,7 @@
             )
 
         state[b'skipread'] = set()
+        state[b'safe_renamed'] = set()
 
         for rev in self:
             node = self.node(rev)
@@ -2897,11 +2932,7 @@
                 if skipflags:
                     skipflags &= self.flags(rev)
 
-                if skipflags:
-                    state[b'skipread'].add(node)
-                else:
-                    # Side-effect: read content and verify hash.
-                    self.revision(node)
+                _verify_revision(self, skipflags, state, node)
 
                 l1 = self.rawsize(rev)
                 l2 = len(self.rawdata(node))
--- a/mercurial/revlogutils/__init__.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/revlogutils/__init__.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,8 @@
+# mercurial.revlogutils -- basic utilities for revlog
+#
+# Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/nodemap.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,15 @@
+# nodemap.py - nodemap related code and utilities
+#
+# Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
+# Copyright 2019 George Racinet <georges.racinet@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+from .. import error
+
+
+class NodeMap(dict):
+    def __missing__(self, x):
+        raise error.RevlogError(b'unknown node: %s' % x)
--- a/mercurial/revlogutils/sidedata.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/revlogutils/sidedata.py	Tue Jan 21 13:14:51 2020 -0500
@@ -33,10 +33,10 @@
 
 from __future__ import absolute_import
 
-import hashlib
 import struct
 
 from .. import error
+from ..utils import hashutil
 
 ## sidedata type constant
 # reserve a block for testing purposes.
@@ -55,8 +55,8 @@
 SD_FILESREMOVED = 11
 
 # internal format constant
-SIDEDATA_HEADER = struct.Struct(r'>H')
-SIDEDATA_ENTRY = struct.Struct(r'>HL20s')
+SIDEDATA_HEADER = struct.Struct('>H')
+SIDEDATA_ENTRY = struct.Struct('>HL20s')
 
 
 def sidedatawriteprocessor(rl, text, sidedata):
@@ -64,7 +64,7 @@
     sidedata.sort()
     rawtext = [SIDEDATA_HEADER.pack(len(sidedata))]
     for key, value in sidedata:
-        digest = hashlib.sha1(value).digest()
+        digest = hashutil.sha1(value).digest()
         rawtext.append(SIDEDATA_ENTRY.pack(key, len(value), digest))
     for key, value in sidedata:
         rawtext.append(value)
@@ -85,7 +85,7 @@
         # read the data associated with that entry
         nextdataoffset = dataoffset + size
         entrytext = text[dataoffset:nextdataoffset]
-        readdigest = hashlib.sha1(entrytext).digest()
+        readdigest = hashutil.sha1(entrytext).digest()
         if storeddigest != readdigest:
             raise error.SidedataHashError(key, storeddigest, readdigest)
         sidedata[key] = entrytext
--- a/mercurial/revset.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/revset.py	Tue Jan 21 13:14:51 2020 -0500
@@ -673,6 +673,7 @@
     1: added
     2: removed
     """
+    label = {0: 'modified', 1: 'added', 2: 'removed'}[field]
     hasset = matchmod.patkind(pat) == b'set'
 
     mcache = [None]
@@ -683,25 +684,23 @@
             mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
         m = mcache[0]
         fname = None
+
+        assert m is not None  # help pytype
         if not m.anypats() and len(m.files()) == 1:
             fname = m.files()[0]
         if fname is not None:
             if fname not in c.files():
                 return False
         else:
-            for f in c.files():
-                if m(f):
-                    break
-            else:
+            if not any(m(f) for f in c.files()):
                 return False
-        files = repo.status(c.p1().node(), c.node())[field]
+        files = getattr(repo.status(c.p1().node(), c.node()), label)
         if fname is not None:
             if fname in files:
                 return True
         else:
-            for f in files:
-                if m(f):
-                    return True
+            if any(m(f) for f in files):
+                return True
 
     return subset.filter(matches, condrepr=(b'<status[%r] %r>', field, pat))
 
@@ -2026,9 +2025,7 @@
         dest = getstring(l[1], _(b"remote requires a repository path"))
     dest = repo.ui.expandpath(dest or b'default')
     dest, branches = hg.parseurl(dest)
-    revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
-    if revs:
-        revs = [repo.lookup(rev) for rev in revs]
+
     other = hg.peer(repo, {}, dest)
     n = other.lookup(q)
     if n in repo:
@@ -2406,10 +2403,10 @@
     filtering.
     """
     cl = repo.unfiltered().changelog
-    torev = cl.rev
+    torev = cl.index.get_rev
     tonode = cl.node
-    nodemap = cl.nodemap
-    result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
+    result = set(torev(n) for n in f(tonode(r) for r in s))
+    result.discard(None)
     return smartset.baseset(result - repo.changelog.filteredrevs)
 
 
--- a/mercurial/scmposix.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/scmposix.py	Tue Jan 21 13:14:51 2020 -0500
@@ -84,13 +84,13 @@
             if not os.isatty(fd):
                 continue
             arri = fcntl.ioctl(fd, TIOCGWINSZ, b'\0' * 8)
-            height, width = array.array(r'h', arri)[:2]
+            height, width = array.array('h', arri)[:2]
             if width > 0 and height > 0:
                 return width, height
         except ValueError:
             pass
         except IOError as e:
-            if e[0] == errno.EINVAL:
+            if e[0] == errno.EINVAL:  # pytype: disable=unsupported-operands
                 pass
             else:
                 raise
--- a/mercurial/scmutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/scmutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,7 +9,6 @@
 
 import errno
 import glob
-import hashlib
 import os
 import posixpath
 import re
@@ -27,7 +26,7 @@
     wdirrev,
 )
 from .pycompat import getattr
-
+from .thirdparty import attr
 from . import (
     copies as copiesmod,
     encoding,
@@ -48,6 +47,7 @@
 )
 
 from .utils import (
+    hashutil,
     procutil,
     stringutil,
 )
@@ -57,63 +57,38 @@
 else:
     from . import scmposix as scmplatform
 
-parsers = policy.importmod(r'parsers')
+parsers = policy.importmod('parsers')
+rustrevlog = policy.importrust('revlog')
 
 termsize = scmplatform.termsize
 
 
-class status(tuple):
-    '''Named tuple with a list of files per status. The 'deleted', 'unknown'
-       and 'ignored' properties are only relevant to the working copy.
+@attr.s(slots=True, repr=False)
+class status(object):
+    '''Struct with a list of files per status.
+
+    The 'deleted', 'unknown' and 'ignored' properties are only
+    relevant to the working copy.
     '''
 
-    __slots__ = ()
-
-    def __new__(
-        cls, modified, added, removed, deleted, unknown, ignored, clean
-    ):
-        return tuple.__new__(
-            cls, (modified, added, removed, deleted, unknown, ignored, clean)
-        )
-
-    @property
-    def modified(self):
-        '''files that have been modified'''
-        return self[0]
-
-    @property
-    def added(self):
-        '''files that have been added'''
-        return self[1]
-
-    @property
-    def removed(self):
-        '''files that have been removed'''
-        return self[2]
+    modified = attr.ib(default=attr.Factory(list))
+    added = attr.ib(default=attr.Factory(list))
+    removed = attr.ib(default=attr.Factory(list))
+    deleted = attr.ib(default=attr.Factory(list))
+    unknown = attr.ib(default=attr.Factory(list))
+    ignored = attr.ib(default=attr.Factory(list))
+    clean = attr.ib(default=attr.Factory(list))
 
-    @property
-    def deleted(self):
-        '''files that are in the dirstate, but have been deleted from the
-           working copy (aka "missing")
-        '''
-        return self[3]
-
-    @property
-    def unknown(self):
-        '''files not in the dirstate that are not ignored'''
-        return self[4]
+    def __iter__(self):
+        yield self.modified
+        yield self.added
+        yield self.removed
+        yield self.deleted
+        yield self.unknown
+        yield self.ignored
+        yield self.clean
 
-    @property
-    def ignored(self):
-        '''files not in the dirstate that are ignored (by _dirignore())'''
-        return self[5]
-
-    @property
-    def clean(self):
-        '''files that have not been modified'''
-        return self[6]
-
-    def __repr__(self, *args, **kwargs):
+    def __repr__(self):
         return (
             r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
             r'unknown=%s, ignored=%s, clean=%s>'
@@ -391,7 +366,7 @@
     key = None
     revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
     if revs:
-        s = hashlib.sha1()
+        s = hashutil.sha1()
         for rev in revs:
             s.update(b'%d;' % rev)
         key = s.digest()
@@ -571,12 +546,14 @@
             if cache is not None:
                 nodetree = cache.get(b'disambiguationnodetree')
             if not nodetree:
-                try:
-                    nodetree = parsers.nodetree(cl.index, len(revs))
-                except AttributeError:
-                    # no native nodetree
-                    pass
-                else:
+                if util.safehasattr(parsers, 'nodetree'):
+                    # The CExt is the only implementation to provide a nodetree
+                    # class so far.
+                    index = cl.index
+                    if util.safehasattr(index, 'get_cindex'):
+                        # the rust wrapped need to give access to its internal index
+                        index = index.get_cindex()
+                    nodetree = parsers.nodetree(index, len(revs))
                     for r in revs:
                         nodetree.insert(r)
                     if cache is not None:
@@ -771,7 +748,7 @@
 
     Specifying a single revset is allowed.
 
-    Returns a ``revset.abstractsmartset`` which is a list-like interface over
+    Returns a ``smartset.abstractsmartset`` which is a list-like interface over
     integer revisions.
     """
     allspecs = []
@@ -964,7 +941,7 @@
         ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
 
         # Remove any files that conflict with the backup file's path
-        for f in reversed(list(util.finddirs(filepath))):
+        for f in reversed(list(pathutil.finddirs(filepath))):
             if origvfs.isfileorlink(f):
                 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
                 origvfs.unlink(f)
@@ -1454,8 +1431,8 @@
     """
     oldctx = repo[b'.']
     ds = repo.dirstate
+    copies = dict(ds.copies())
     ds.setparents(newctx.node(), nullid)
-    copies = dict(ds.copies())
     s = newctx.status(oldctx, match=match)
     for f in s.modified:
         if ds[f] == b'r':
@@ -1489,6 +1466,7 @@
         if src not in newctx or dst in newctx or ds[dst] != b'a':
             src = None
         ds.copy(src, dst)
+    repo._quick_access_changeid_invalidate()
 
 
 def writerequires(opener, requirements):
@@ -1783,6 +1761,7 @@
         self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
 
     def _printdebug(self, item):
+        unit = b''
         if self.unit:
             unit = b' ' + self.unit
         if item:
@@ -1943,6 +1922,7 @@
         def wrapped(tr):
             repo = reporef()
             if filtername:
+                assert repo is not None  # help pytype
                 repo = repo.filtered(filtername)
             func(repo, tr)
 
@@ -1962,6 +1942,7 @@
             if cgheads:
                 htext = _(b" (%+d heads)") % cgheads
             msg = _(b"added %d changesets with %d changes to %d files%s\n")
+            assert repo is not None  # help pytype
             repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
 
     if txmatch(_reportobsoletedsource):
--- a/mercurial/scmwindows.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/scmwindows.py	Tue Jan 21 13:14:51 2020 -0500
@@ -10,11 +10,12 @@
 )
 
 try:
-    import _winreg as winreg
+    import _winreg as winreg  # pytype: disable=import-error
 
     winreg.CloseKey
 except ImportError:
-    import winreg
+    # py2 only
+    import winreg  # pytype: disable=import-error
 
 # MS-DOS 'more' is the only pager available by default on Windows.
 fallbackpager = b'more'
@@ -27,26 +28,41 @@
     # Use mercurial.ini found in directory with hg.exe
     progrc = os.path.join(os.path.dirname(filename), b'mercurial.ini')
     rcpath.append(progrc)
+
+    def _processdir(progrcd):
+        if os.path.isdir(progrcd):
+            for f, kind in util.listdir(progrcd):
+                if f.endswith(b'.rc'):
+                    rcpath.append(os.path.join(progrcd, f))
+
     # Use hgrc.d found in directory with hg.exe
-    progrcd = os.path.join(os.path.dirname(filename), b'hgrc.d')
-    if os.path.isdir(progrcd):
-        for f, kind in util.listdir(progrcd):
-            if f.endswith(b'.rc'):
-                rcpath.append(os.path.join(progrcd, f))
-    # else look for a system rcpath in the registry
+    _processdir(os.path.join(os.path.dirname(filename), b'hgrc.d'))
+
+    # treat a PROGRAMDATA directory as equivalent to /etc/mercurial
+    programdata = encoding.environ.get(b'PROGRAMDATA')
+    if programdata:
+        programdata = os.path.join(programdata, b'Mercurial')
+        _processdir(os.path.join(programdata, b'hgrc.d'))
+
+        ini = os.path.join(programdata, b'mercurial.ini')
+        if os.path.isfile(ini):
+            rcpath.append(ini)
+
+        ini = os.path.join(programdata, b'hgrc')
+        if os.path.isfile(ini):
+            rcpath.append(ini)
+
+    # next look for a system rcpath in the registry
     value = util.lookupreg(
         b'SOFTWARE\\Mercurial', None, winreg.HKEY_LOCAL_MACHINE
     )
-    if not isinstance(value, bytes) or not value:
-        return rcpath
-    value = util.localpath(value)
-    for p in value.split(pycompat.ospathsep):
-        if p.lower().endswith(b'mercurial.ini'):
-            rcpath.append(p)
-        elif os.path.isdir(p):
-            for f, kind in util.listdir(p):
-                if f.endswith(b'.rc'):
-                    rcpath.append(os.path.join(p, f))
+    if value and isinstance(value, bytes):
+        value = util.localpath(value)
+        for p in value.split(pycompat.ospathsep):
+            if p.lower().endswith(b'mercurial.ini'):
+                rcpath.append(p)
+            else:
+                _processdir(p)
     return rcpath
 
 
--- a/mercurial/setdiscovery.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/setdiscovery.py	Tue Jan 21 13:14:51 2020 -0500
@@ -278,7 +278,7 @@
 
 
 partialdiscovery = policy.importrust(
-    r'discovery', member=r'PartialDiscovery', default=partialdiscovery
+    'discovery', member='PartialDiscovery', default=partialdiscovery
 )
 
 
--- a/mercurial/shelve.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/shelve.py	Tue Jan 21 13:14:51 2020 -0500
@@ -557,8 +557,6 @@
         match = scmutil.matchfiles(repo, repo[node].files())
         _shelvecreatedcommit(repo, node, name, match)
 
-        if ui.formatted():
-            desc = stringutil.ellipsis(desc, ui.termwidth())
         ui.status(_(b'shelved as %s\n') % name)
         if opts[b'keep']:
             with repo.dirstate.parentchange():
--- a/mercurial/simplemerge.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/simplemerge.py	Tue Jan 21 13:14:51 2020 -0500
@@ -291,7 +291,19 @@
             if region[0] != b"conflict":
                 yield region
                 continue
-            issue, z1, z2, a1, a2, b1, b2 = region
+            # pytype thinks this tuple contains only 3 things, but
+            # that's clearly not true because this code successfully
+            # executes. It might be wise to rework merge_regions to be
+            # some kind of attrs type.
+            (
+                issue,
+                z1,
+                z2,
+                a1,
+                a2,
+                b1,
+                b2,
+            ) = region  # pytype: disable=bad-unpacking
             alen = a2 - a1
             blen = b2 - b1
 
--- a/mercurial/smartset.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/smartset.py	Tue Jan 21 13:14:51 2020 -0500
@@ -256,7 +256,7 @@
     @util.propertycache
     def _list(self):
         # _list is only lazily constructed if we have _set
-        assert r'_set' in self.__dict__
+        assert '_set' in self.__dict__
         return list(self._set)
 
     def __iter__(self):
@@ -294,7 +294,7 @@
         self._istopo = False
 
     def __len__(self):
-        if r'_list' in self.__dict__:
+        if '_list' in self.__dict__:
             return len(self._list)
         else:
             return len(self._set)
@@ -347,8 +347,8 @@
         # try to use native set operations as fast paths
         if (
             type(other) is baseset
-            and r'_set' in other.__dict__
-            and r'_set' in self.__dict__
+            and '_set' in other.__dict__
+            and '_set' in self.__dict__
             and self._ascending is not None
         ):
             s = baseset(
--- a/mercurial/sparse.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/sparse.py	Tue Jan 21 13:14:51 2020 -0500
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import hashlib
 import os
 
 from .i18n import _
@@ -24,6 +23,7 @@
     scmutil,
     util,
 )
+from .utils import hashutil
 
 # Whether sparse features are enabled. This variable is intended to be
 # temporary to facilitate porting sparse to core. It should eventually be
@@ -205,12 +205,12 @@
         tempsignature = b'0'
 
     if signature is None or (includetemp and tempsignature is None):
-        signature = hex(hashlib.sha1(repo.vfs.tryread(b'sparse')).digest())
+        signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
         cache[b'signature'] = signature
 
         if includetemp:
             raw = repo.vfs.tryread(b'tempsparse')
-            tempsignature = hex(hashlib.sha1(raw).digest())
+            tempsignature = hex(hashutil.sha1(raw).digest())
             cache[b'tempsignature'] = tempsignature
 
     return b'%s %s' % (signature, tempsignature)
--- a/mercurial/sslutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/sslutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -24,7 +24,8 @@
     util,
 )
 from .utils import (
-    procutil,
+    hashutil,
+    resourceutil,
     stringutil,
 )
 
@@ -103,13 +104,13 @@
             # in this legacy code since we don't support SNI.
 
             args = {
-                r'keyfile': self._keyfile,
-                r'certfile': self._certfile,
-                r'server_side': server_side,
-                r'cert_reqs': self.verify_mode,
-                r'ssl_version': self.protocol,
-                r'ca_certs': self._cacerts,
-                r'ciphers': self._ciphers,
+                'keyfile': self._keyfile,
+                'certfile': self._certfile,
+                'server_side': server_side,
+                'cert_reqs': self.verify_mode,
+                'ssl_version': self.protocol,
+                'ca_certs': self._cacerts,
+                'ciphers': self._ciphers,
             }
 
             return ssl.wrap_socket(socket, **args)
@@ -499,7 +500,7 @@
             # outright. Hopefully the reason for this error is that we require
             # TLS 1.1+ and the server only supports TLS 1.0. Whatever the
             # reason, try to emit an actionable warning.
-            if e.reason == r'UNSUPPORTED_PROTOCOL':
+            if e.reason == 'UNSUPPORTED_PROTOCOL':
                 # We attempted TLS 1.0+.
                 if settings[b'protocolui'] == b'tls1.0':
                     # We support more than just TLS 1.0+. If this happens,
@@ -568,9 +569,7 @@
                         )
                     )
 
-            elif (
-                e.reason == r'CERTIFICATE_VERIFY_FAILED' and pycompat.iswindows
-            ):
+            elif e.reason == 'CERTIFICATE_VERIFY_FAILED' and pycompat.iswindows:
 
                 ui.warn(
                     _(
@@ -737,9 +736,9 @@
         return _(b'no certificate received')
 
     dnsnames = []
-    san = cert.get(r'subjectAltName', [])
+    san = cert.get('subjectAltName', [])
     for key, value in san:
-        if key == r'DNS':
+        if key == 'DNS':
             try:
                 if _dnsnamematch(value, hostname):
                     return
@@ -750,11 +749,11 @@
 
     if not dnsnames:
         # The subject is only checked when there is no DNS in subjectAltName.
-        for sub in cert.get(r'subject', []):
+        for sub in cert.get('subject', []):
             for key, value in sub:
                 # According to RFC 2818 the most specific Common Name must
                 # be used.
-                if key == r'commonName':
+                if key == 'commonName':
                     # 'subject' entries are unicode.
                     try:
                         value = value.encode('ascii')
@@ -788,7 +787,7 @@
     """
     if (
         not pycompat.isdarwin
-        or procutil.mainfrozen()
+        or resourceutil.mainfrozen()
         or not pycompat.sysexecutable
     ):
         return False
@@ -951,7 +950,7 @@
     # If a certificate fingerprint is pinned, use it and only it to
     # validate the remote cert.
     peerfingerprints = {
-        b'sha1': node.hex(hashlib.sha1(peercert).digest()),
+        b'sha1': node.hex(hashutil.sha1(peercert).digest()),
         b'sha256': node.hex(hashlib.sha256(peercert).digest()),
         b'sha512': node.hex(hashlib.sha512(peercert).digest()),
     }
--- a/mercurial/state.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/state.py	Tue Jan 21 13:14:51 2020 -0500
@@ -23,10 +23,20 @@
 
 from . import (
     error,
+    pycompat,
     util,
 )
 from .utils import cborutil
 
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Any,
+        Dict,
+    )
+
+    for t in (Any, Dict):
+        assert t
+
 
 class cmdstate(object):
     """a wrapper class to store the state of commands like `rebase`, `graft`,
@@ -50,6 +60,7 @@
         self.fname = fname
 
     def read(self):
+        # type: () -> Dict[bytes, Any]
         """read the existing state file and return a dict of data stored"""
         return self._read()
 
--- a/mercurial/statichttprepo.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/statichttprepo.py	Tue Jan 21 13:14:51 2020 -0500
@@ -53,7 +53,7 @@
         if bytes:
             end = self.pos + bytes - 1
         if self.pos or end:
-            req.add_header(r'Range', r'bytes=%d-%s' % (self.pos, end))
+            req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
 
         try:
             f = self.opener.open(req)
--- a/mercurial/statprof.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/statprof.py	Tue Jan 21 13:14:51 2020 -0500
@@ -126,20 +126,20 @@
 __all__ = [b'start', b'stop', b'reset', b'display', b'profile']
 
 skips = {
-    r"util.py:check",
-    r"extensions.py:closure",
-    r"color.py:colorcmd",
-    r"dispatch.py:checkargs",
-    r"dispatch.py:<lambda>",
-    r"dispatch.py:_runcatch",
-    r"dispatch.py:_dispatch",
-    r"dispatch.py:_runcommand",
-    r"pager.py:pagecmd",
-    r"dispatch.py:run",
-    r"dispatch.py:dispatch",
-    r"dispatch.py:runcommand",
-    r"hg.py:<module>",
-    r"evolve.py:warnobserrors",
+    "util.py:check",
+    "extensions.py:closure",
+    "color.py:colorcmd",
+    "dispatch.py:checkargs",
+    "dispatch.py:<lambda>",
+    "dispatch.py:_runcatch",
+    "dispatch.py:_dispatch",
+    "dispatch.py:_runcommand",
+    "pager.py:pagecmd",
+    "dispatch.py:run",
+    "dispatch.py:dispatch",
+    "dispatch.py:runcommand",
+    "hg.py:<module>",
+    "evolve.py:warnobserrors",
 }
 
 ###########################################################################
@@ -206,7 +206,7 @@
 class CodeSite(object):
     cache = {}
 
-    __slots__ = (r'path', r'lineno', r'function', r'source')
+    __slots__ = ('path', 'lineno', 'function', 'source')
 
     def __init__(self, path, lineno, function):
         assert isinstance(path, bytes)
@@ -258,11 +258,11 @@
         return os.path.basename(self.path)
 
     def skipname(self):
-        return r'%s:%s' % (self.filename(), self.function)
+        return '%s:%s' % (self.filename(), self.function)
 
 
 class Sample(object):
-    __slots__ = (r'stack', r'time')
+    __slots__ = ('stack', 'time')
 
     def __init__(self, stack, time):
         self.stack = stack
@@ -352,7 +352,7 @@
             frame = inspect.currentframe()
             tid = [k for k, f in sys._current_frames().items() if f == frame][0]
             state.thread = threading.Thread(
-                target=samplerthread, args=(tid,), name=b"samplerthread"
+                target=samplerthread, args=(tid,), name="samplerthread"
             )
             state.thread.start()
 
@@ -738,7 +738,7 @@
     for sample in data.samples:
         root.add(sample.stack[::-1], sample.time - lasttime)
         lasttime = sample.time
-    showtime = kwargs.get(r'showtime', True)
+    showtime = kwargs.get('showtime', True)
 
     def _write(node, depth, multiple_siblings):
         site = node.site
@@ -894,7 +894,7 @@
         parent = stackid(stack[1:])
         myid = len(stack2id)
         stack2id[stack] = myid
-        id2stack.append(dict(category=stack[0][0], name=r'%s %s' % stack[0]))
+        id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0]))
         if parent is not None:
             id2stack[-1].update(parent=parent)
         return myid
@@ -931,7 +931,7 @@
             sampletime = max(oldtime + clamp, sample.time)
             samples.append(
                 dict(
-                    ph=r'E',
+                    ph='E',
                     name=oldfunc,
                     cat=oldcat,
                     sf=oldsid,
@@ -949,7 +949,7 @@
         stack = tuple(
             (
                 (
-                    r'%s:%d'
+                    '%s:%d'
                     % (simplifypath(pycompat.sysstr(frame.path)), frame.lineno),
                     pycompat.sysstr(frame.function),
                 )
@@ -971,7 +971,7 @@
             sid = stackid(tuple(laststack))
             samples.append(
                 dict(
-                    ph=r'B',
+                    ph='B',
                     name=name,
                     cat=path,
                     ts=sample.time * 1e6,
@@ -1030,17 +1030,17 @@
 
     optstart = 2
     displayargs[b'function'] = None
-    if argv[1] == r'hotpath':
+    if argv[1] == 'hotpath':
         displayargs[b'format'] = DisplayFormats.Hotpath
-    elif argv[1] == r'lines':
+    elif argv[1] == 'lines':
         displayargs[b'format'] = DisplayFormats.ByLine
-    elif argv[1] == r'functions':
+    elif argv[1] == 'functions':
         displayargs[b'format'] = DisplayFormats.ByMethod
-    elif argv[1] == r'function':
+    elif argv[1] == 'function':
         displayargs[b'format'] = DisplayFormats.AboutMethod
         displayargs[b'function'] = argv[2]
         optstart = 3
-    elif argv[1] == r'flame':
+    elif argv[1] == 'flame':
         displayargs[b'format'] = DisplayFormats.FlameGraph
     else:
         printusage()
@@ -1061,22 +1061,22 @@
     displayargs[b'limit'] = 0.05
     path = None
     for o, value in opts:
-        if o in (r"-l", r"--limit"):
+        if o in ("-l", "--limit"):
             displayargs[b'limit'] = float(value)
-        elif o in (r"-f", r"--file"):
+        elif o in ("-f", "--file"):
             path = value
-        elif o in (r"-o", r"--output-file"):
+        elif o in ("-o", "--output-file"):
             displayargs[b'outputfile'] = value
-        elif o in (r"-p", r"--script-path"):
+        elif o in ("-p", "--script-path"):
             displayargs[b'scriptpath'] = value
-        elif o in (r"-h", r"help"):
+        elif o in ("-h", "help"):
             printusage()
             return 0
         else:
             assert False, b"unhandled option %s" % o
 
     if not path:
-        print(r'must specify --file to load')
+        print('must specify --file to load')
         return 1
 
     load_data(path=path)
@@ -1086,5 +1086,5 @@
     return 0
 
 
-if __name__ == r"__main__":
+if __name__ == "__main__":
     sys.exit(main())
--- a/mercurial/store.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/store.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,7 +9,6 @@
 
 import errno
 import functools
-import hashlib
 import os
 import stat
 
@@ -25,8 +24,9 @@
     util,
     vfs as vfsmod,
 )
+from .utils import hashutil
 
-parsers = policy.importmod(r'parsers')
+parsers = policy.importmod('parsers')
 # how much bytes should be read from fncache in one read
 # It is done to prevent loading large fncache files into memory
 fncache_chunksize = 10 ** 6
@@ -273,7 +273,7 @@
 
 
 def _hashencode(path, dotencode):
-    digest = node.hex(hashlib.sha1(path).digest())
+    digest = node.hex(hashutil.sha1(path).digest())
     le = lowerencode(path[5:]).split(b'/')  # skips prefix 'data/' or 'meta/'
     parts = _auxencode(le, dotencode)
     basename = parts[-1]
--- a/mercurial/subrepo.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/subrepo.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,7 +9,6 @@
 
 import copy
 import errno
-import hashlib
 import os
 import re
 import stat
@@ -19,7 +18,6 @@
 import xml.dom.minidom
 
 from .i18n import _
-from .pycompat import open
 from . import (
     cmdutil,
     encoding,
@@ -38,6 +36,7 @@
 )
 from .utils import (
     dateutil,
+    hashutil,
     procutil,
     stringutil,
 )
@@ -62,15 +61,15 @@
 
 def _getstorehashcachename(remotepath):
     '''get a unique filename for the store hash cache of a remote repository'''
-    return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12]
+    return node.hex(hashutil.sha1(_expandedabspath(remotepath)).digest())[0:12]
 
 
 class SubrepoAbort(error.Abort):
     """Exception class used to avoid handling a subrepo error more than once"""
 
     def __init__(self, *args, **kw):
-        self.subrepo = kw.pop(r'subrepo', None)
-        self.cause = kw.pop(r'cause', None)
+        self.subrepo = kw.pop('subrepo', None)
+        self.cause = kw.pop('cause', None)
         error.Abort.__init__(self, *args, **kw)
 
 
@@ -356,7 +355,7 @@
         """return file flags"""
         return b''
 
-    def matchfileset(self, expr, badfn=None):
+    def matchfileset(self, cwd, expr, badfn=None):
         """Resolve the fileset expression for this repo"""
         return matchmod.never(badfn=badfn)
 
@@ -430,10 +429,12 @@
         convert this repository from shared to normal storage.
         '''
 
-    def verify(self):
-        '''verify the integrity of the repository.  Return 0 on success or
-        warning, 1 on any error.
-        '''
+    def verify(self, onpush=False):
+        """verify the revision of this repository that is held in `_state` is
+        present and not hidden.  Return 0 on success or warning, 1 on any
+        error.  In the case of ``onpush``, warnings or errors will raise an
+        exception if the result of pushing would be a broken remote repository.
+        """
         return 0
 
     @propertycache
@@ -513,7 +514,7 @@
         yield b'# %s\n' % _expandedabspath(remotepath)
         vfs = self._repo.vfs
         for relname in filelist:
-            filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest())
+            filehash = node.hex(hashutil.sha1(vfs.tryread(relname)).digest())
             yield b'%s = %s\n' % (relname, filehash)
 
     @propertycache
@@ -895,20 +896,20 @@
         return cmdutil.files(ui, ctx, m, uipathfn, fm, fmt, subrepos)
 
     @annotatesubrepoerror
-    def matchfileset(self, expr, badfn=None):
+    def matchfileset(self, cwd, expr, badfn=None):
         if self._ctx.rev() is None:
             ctx = self._repo[None]
         else:
             rev = self._state[1]
             ctx = self._repo[rev]
 
-        matchers = [ctx.matchfileset(expr, badfn=badfn)]
+        matchers = [ctx.matchfileset(cwd, expr, badfn=badfn)]
 
         for subpath in ctx.substate:
             sub = ctx.sub(subpath)
 
             try:
-                sm = sub.matchfileset(expr, badfn=badfn)
+                sm = sub.matchfileset(cwd, expr, badfn=badfn)
                 pm = matchmod.prefixdirmatcher(subpath, sm, badfn=badfn)
                 matchers.append(pm)
             except error.LookupError:
@@ -969,24 +970,24 @@
         # 2. update the subrepo to the revision specified in
         #    the corresponding substate dictionary
         self.ui.status(_(b'reverting subrepo %s\n') % substate[0])
-        if not opts.get(r'no_backup'):
+        if not opts.get('no_backup'):
             # Revert all files on the subrepo, creating backups
             # Note that this will not recursively revert subrepos
             # We could do it if there was a set:subrepos() predicate
             opts = opts.copy()
-            opts[r'date'] = None
-            opts[r'rev'] = substate[1]
+            opts['date'] = None
+            opts['rev'] = substate[1]
 
             self.filerevert(*pats, **opts)
 
         # Update the repo to the revision specified in the given substate
-        if not opts.get(r'dry_run'):
+        if not opts.get('dry_run'):
             self.get(substate, overwrite=True)
 
     def filerevert(self, *pats, **opts):
-        ctx = self._repo[opts[r'rev']]
+        ctx = self._repo[opts['rev']]
         parents = self._repo.dirstate.parents()
-        if opts.get(r'all'):
+        if opts.get('all'):
             pats = [b'set:modified()']
         else:
             pats = []
@@ -1014,26 +1015,35 @@
 
         hg.unshare(self.ui, self._repo)
 
-    def verify(self):
+    def verify(self, onpush=False):
         try:
             rev = self._state[1]
             ctx = self._repo.unfiltered()[rev]
             if ctx.hidden():
                 # Since hidden revisions aren't pushed/pulled, it seems worth an
                 # explicit warning.
-                ui = self._repo.ui
-                ui.warn(
-                    _(b"subrepo '%s' is hidden in revision %s\n")
-                    % (self._relpath, node.short(self._ctx.node()))
+                msg = _(b"subrepo '%s' is hidden in revision %s") % (
+                    self._relpath,
+                    node.short(self._ctx.node()),
                 )
+
+                if onpush:
+                    raise error.Abort(msg)
+                else:
+                    self._repo.ui.warn(b'%s\n' % msg)
             return 0
         except error.RepoLookupError:
             # A missing subrepo revision may be a case of needing to pull it, so
-            # don't treat this as an error.
-            self._repo.ui.warn(
-                _(b"subrepo '%s' not found in revision %s\n")
-                % (self._relpath, node.short(self._ctx.node()))
+            # don't treat this as an error for `hg verify`.
+            msg = _(b"subrepo '%s' not found in revision %s") % (
+                self._relpath,
+                node.short(self._ctx.node()),
             )
+
+            if onpush:
+                raise error.Abort(msg)
+            else:
+                self._repo.ui.warn(b'%s\n' % msg)
             return 0
 
     @propertycache
@@ -1066,7 +1076,7 @@
         if not self.ui.interactive():
             # Making stdin be a pipe should prevent svn from behaving
             # interactively even if we can't pass --non-interactive.
-            extrakw[r'stdin'] = subprocess.PIPE
+            extrakw['stdin'] = subprocess.PIPE
             # Starting in svn 1.5 --non-interactive is a global flag
             # instead of being per-command, but we need to support 1.4 so
             # we have to be intelligent about what commands take
@@ -1125,14 +1135,14 @@
         # both. We used to store the working directory one.
         output, err = self._svncommand([b'info', b'--xml'])
         doc = xml.dom.minidom.parseString(output)
-        entries = doc.getElementsByTagName(r'entry')
+        entries = doc.getElementsByTagName('entry')
         lastrev, rev = b'0', b'0'
         if entries:
-            rev = pycompat.bytestr(entries[0].getAttribute(r'revision')) or b'0'
-            commits = entries[0].getElementsByTagName(r'commit')
+            rev = pycompat.bytestr(entries[0].getAttribute('revision')) or b'0'
+            commits = entries[0].getElementsByTagName('commit')
             if commits:
                 lastrev = (
-                    pycompat.bytestr(commits[0].getAttribute(r'revision'))
+                    pycompat.bytestr(commits[0].getAttribute('revision'))
                     or b'0'
                 )
         return (lastrev, rev)
@@ -1149,23 +1159,23 @@
         output, err = self._svncommand([b'status', b'--xml'])
         externals, changes, missing = [], [], []
         doc = xml.dom.minidom.parseString(output)
-        for e in doc.getElementsByTagName(r'entry'):
-            s = e.getElementsByTagName(r'wc-status')
+        for e in doc.getElementsByTagName('entry'):
+            s = e.getElementsByTagName('wc-status')
             if not s:
                 continue
-            item = s[0].getAttribute(r'item')
-            props = s[0].getAttribute(r'props')
-            path = e.getAttribute(r'path').encode('utf8')
-            if item == r'external':
+            item = s[0].getAttribute('item')
+            props = s[0].getAttribute('props')
+            path = e.getAttribute('path').encode('utf8')
+            if item == 'external':
                 externals.append(path)
-            elif item == r'missing':
+            elif item == 'missing':
                 missing.append(path)
             if item not in (
-                r'',
-                r'normal',
-                r'unversioned',
-                r'external',
-            ) or props not in (r'', r'none', r'normal'):
+                '',
+                'normal',
+                'unversioned',
+                'external',
+            ) or props not in ('', 'none', 'normal'):
                 changes.append(path)
         for path in changes:
             for ext in externals:
@@ -1291,13 +1301,13 @@
         output = self._svncommand([b'list', b'--recursive', b'--xml'])[0]
         doc = xml.dom.minidom.parseString(output)
         paths = []
-        for e in doc.getElementsByTagName(r'entry'):
-            kind = pycompat.bytestr(e.getAttribute(r'kind'))
+        for e in doc.getElementsByTagName('entry'):
+            kind = pycompat.bytestr(e.getAttribute('kind'))
             if kind != b'file':
                 continue
-            name = r''.join(
+            name = ''.join(
                 c.data
-                for c in e.getElementsByTagName(r'name')[0].childNodes
+                for c in e.getElementsByTagName('name')[0].childNodes
                 if c.nodeType == c.TEXT_NODE
             )
             paths.append(name.encode('utf8'))
@@ -1434,7 +1444,7 @@
         # which is mostly progress and useful info
         errpipe = None
         if self.ui.quiet:
-            errpipe = open(os.devnull, b'w')
+            errpipe = pycompat.open(os.devnull, b'w')
         if self.ui._colormode and len(commands) and commands[0] == b"diff":
             # insert the argument in the front,
             # the end of git diff arguments is used for paths
@@ -1528,7 +1538,7 @@
         return branch2rev, rev2branch
 
     def _gittracking(self, branches):
-        b'return map of remote branch to local tracking branch'
+        """return map of remote branch to local tracking branch"""
         # assumes no more than one local tracking branch for each remote
         tracking = {}
         for b in branches:
@@ -1808,7 +1818,7 @@
                 if exact:
                     rejected.append(f)
                 continue
-            if not opts.get(r'dry_run'):
+            if not opts.get('dry_run'):
                 self._gitcommand(command + [f])
 
         for f in rejected:
@@ -1849,7 +1859,7 @@
         # This should be much faster than manually traversing the trees
         # and objects with many subprocess calls.
         tarstream = self._gitcommand([b'archive', revision], stream=True)
-        tar = tarfile.open(fileobj=tarstream, mode=r'r|')
+        tar = tarfile.open(fileobj=tarstream, mode='r|')
         relpath = subrelpath(self)
         progress = self.ui.makeprogress(
             _(b'archiving (%s)') % relpath, unit=_(b'files')
@@ -1918,9 +1928,9 @@
         deleted, unknown, ignored, clean = [], [], [], []
 
         command = [b'status', b'--porcelain', b'-z']
-        if opts.get(r'unknown'):
+        if opts.get('unknown'):
             command += [b'--untracked-files=all']
-        if opts.get(r'ignored'):
+        if opts.get('ignored'):
             command += [b'--ignored']
         out = self._gitcommand(command)
 
@@ -1948,7 +1958,7 @@
             elif st == b'!!':
                 ignored.append(filename1)
 
-        if opts.get(r'clean'):
+        if opts.get('clean'):
             out = self._gitcommand([b'ls-files'])
             for f in out.split(b'\n'):
                 if not f in changedfiles:
@@ -1962,7 +1972,7 @@
     def diff(self, ui, diffopts, node2, match, prefix, **opts):
         node1 = self._state[1]
         cmd = [b'diff', b'--no-renames']
-        if opts[r'stat']:
+        if opts['stat']:
             cmd.append(b'--stat')
         else:
             # for Git, this also implies '-p'
@@ -1995,8 +2005,12 @@
         if match.always():
             output += self._gitcommand(cmd) + b'\n'
         else:
-            st = self.status(node2)[:3]
-            files = [f for sublist in st for f in sublist]
+            st = self.status(node2)
+            files = [
+                f
+                for sublist in (st.modified, st.added, st.removed)
+                for f in sublist
+            ]
             for f in files:
                 if match(f):
                     output += self._gitcommand(cmd + [b'--', f]) + b'\n'
@@ -2007,7 +2021,7 @@
     @annotatesubrepoerror
     def revert(self, substate, *pats, **opts):
         self.ui.status(_(b'reverting subrepo %s\n') % substate[0])
-        if not opts.get(r'no_backup'):
+        if not opts.get('no_backup'):
             status = self.status(None)
             names = status.modified
             for name in names:
@@ -2023,7 +2037,7 @@
                 )
                 util.rename(self.wvfs.join(name), bakname)
 
-        if not opts.get(r'dry_run'):
+        if not opts.get('dry_run'):
             self.get(substate, overwrite=True)
         return []
 
--- a/mercurial/tags.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/tags.py	Tue Jan 21 13:14:51 2020 -0500
@@ -194,8 +194,8 @@
         return alltags
 
     for head in reversed(heads):  # oldest to newest
-        assert (
-            head in repo.changelog.nodemap
+        assert repo.changelog.index.has_node(
+            head
         ), b"tag cache returned bogus head %s" % short(head)
     fnodes = _filterfnodes(tagfnode, reversed(heads))
     alltags = _tagsfromfnodes(ui, repo, fnodes)
@@ -571,7 +571,17 @@
 
     if not local:
         m = matchmod.exact([b'.hgtags'])
-        if any(repo.status(match=m, unknown=True, ignored=True)):
+        st = repo.status(match=m, unknown=True, ignored=True)
+        if any(
+            (
+                st.modified,
+                st.added,
+                st.removed,
+                st.deleted,
+                st.unknown,
+                st.ignored,
+            )
+        ):
             raise error.Abort(
                 _(b'working copy of .hgtags is changed'),
                 hint=_(b'please commit .hgtags manually'),
--- a/mercurial/templatefilters.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/templatefilters.py	Tue Jan 21 13:14:51 2020 -0500
@@ -299,7 +299,7 @@
     return dateutil.datestr(text, b'%Y-%m-%d %H:%M:%S %1%2')
 
 
-def indent(text, prefix):
+def indent(text, prefix, firstline=b''):
     '''indent each non-empty line of text after first with prefix.'''
     lines = text.splitlines()
     num_lines = len(lines)
@@ -308,8 +308,8 @@
     def indenter():
         for i in pycompat.xrange(num_lines):
             l = lines[i]
-            if i and l.strip():
-                yield prefix
+            if l.strip():
+                yield prefix if i else firstline
             yield l
             if i < num_lines - 1 or endswithnewline:
                 yield b'\n'
--- a/mercurial/templatefuncs.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/templatefuncs.py	Tue Jan 21 13:14:51 2020 -0500
@@ -310,13 +310,11 @@
     text = evalstring(context, mapping, args[0])
     indent = evalstring(context, mapping, args[1])
 
+    firstline = indent
     if len(args) == 3:
         firstline = evalstring(context, mapping, args[2])
-    else:
-        firstline = indent
 
-    # the indent function doesn't indent the first line, so we do it here
-    return templatefilters.indent(firstline + text, indent)
+    return templatefilters.indent(text, indent, firstline=firstline)
 
 
 @templatefunc(b'get(dict, key)')
--- a/mercurial/templater.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/templater.py	Tue Jan 21 13:14:51 2020 -0500
@@ -80,7 +80,10 @@
     templateutil,
     util,
 )
-from .utils import stringutil
+from .utils import (
+    resourceutil,
+    stringutil,
+)
 
 # template parsing
 
@@ -611,7 +614,7 @@
     return s[1:-1]
 
 
-class resourcemapper(object):
+class resourcemapper(object):  # pytype: disable=ignored-metaclass
     """Mapper of internal template resources"""
 
     __metaclass__ = abc.ABCMeta
@@ -1042,7 +1045,10 @@
 def templatepaths():
     '''return locations used for template files.'''
     pathsrel = [b'templates']
-    paths = [os.path.normpath(os.path.join(util.datapath, f)) for f in pathsrel]
+    paths = [
+        os.path.normpath(os.path.join(resourceutil.datapath, f))
+        for f in pathsrel
+    ]
     return [p for p in paths if os.path.isdir(p)]
 
 
--- a/mercurial/templates/json/map	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/templates/json/map	Tue Jan 21 13:14:51 2020 -0500
@@ -65,6 +65,8 @@
   "tags": [{join(changesettag, ", ")}],
   "user": {author|utf8|json},
   "parents": [{join(parent%changesetparent, ", ")}],
+  "files": [{join(files, ", ")}],
+  "diff": [{join(diff, ", ")}],
   "phase": {phase|json}
   }'
 changesetbranch = '{name|utf8|json}'
@@ -229,8 +231,11 @@
   "topic": {topic|utf8|json},
   "rawdoc": {doc|utf8|json}
   }'
-filenodelink = ''
-filenolink = ''
+filenodelink = '\{
+  "file": {file|json},
+  "status": {status|json}
+  }'
+filenolink = '{filenodelink}'
 index = '\{
   "entries": [{join(entries%indexentry, ", ")}]
   }'
--- a/mercurial/templateutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/templateutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -31,7 +31,7 @@
     pass
 
 
-class wrapped(object):
+class wrapped(object):  # pytype: disable=ignored-metaclass
     """Object requiring extra conversion prior to displaying or processing
     as value
 
@@ -108,9 +108,11 @@
         """
 
 
-class mappable(object):
+class mappable(object):  # pytype: disable=ignored-metaclass
     """Object which can be converted to a single template mapping"""
 
+    __metaclass__ = abc.ABCMeta
+
     def itermaps(self, context):
         yield self.tomap(context)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/testing/revlog.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,38 @@
+from __future__ import absolute_import
+import unittest
+
+# picked from test-parse-index2, copied rather than imported
+# so that it stays stable even if test-parse-index2 changes or disappears.
+data_non_inlined = (
+    b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01D\x19'
+    b'\x00\x07e\x12\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff'
+    b'\xff\xff\xff\xff\xd1\xf4\xbb\xb0\xbe\xfc\x13\xbd\x8c\xd3\x9d'
+    b'\x0f\xcd\xd9;\x8c\x07\x8cJ/\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+    b'\x00\x00\x00\x00\x00\x00\x01D\x19\x00\x00\x00\x00\x00\xdf\x00'
+    b'\x00\x01q\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff'
+    b'\xff\xff\xff\xc1\x12\xb9\x04\x96\xa4Z1t\x91\xdfsJ\x90\xf0\x9bh'
+    b'\x07l&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+    b'\x00\x01D\xf8\x00\x00\x00\x00\x01\x1b\x00\x00\x01\xb8\x00\x00'
+    b'\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\x02\n'
+    b'\x0e\xc6&\xa1\x92\xae6\x0b\x02i\xfe-\xe5\xbao\x05\xd1\xe7\x00'
+    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01F'
+    b'\x13\x00\x00\x00\x00\x01\xec\x00\x00\x03\x06\x00\x00\x00\x01'
+    b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1'
+    b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00'
+    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+)
+
+
+try:
+    from ..cext import parsers as cparsers
+except ImportError:
+    cparsers = None
+
+
+@unittest.skipIf(
+    cparsers is None,
+    'The C version of the "parsers" module is not available. It is needed for this test.',
+)
+class RevlogBasedTestBase(unittest.TestCase):
+    def parseindex(self):
+        return cparsers.parse_index2(data_non_inlined, False)[0]
--- a/mercurial/testing/storage.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/testing/storage.py	Tue Jan 21 13:14:51 2020 -0500
@@ -964,7 +964,7 @@
         with self.assertRaises(error.StorageError):
             f.rawdata(node1)
 
-    def testbadnoderevisionraw(self):
+    def testbadnoderevision(self):
         # Like above except we test read() first to isolate revision caching
         # behavior.
         f = self._makefilefn()
@@ -1366,26 +1366,26 @@
     should find and run it automatically.
     """
     d = {
-        r'_makefilefn': makefilefn,
-        r'_maketransactionfn': maketransactionfn,
-        r'_addrawrevisionfn': addrawrevisionfn,
+        '_makefilefn': makefilefn,
+        '_maketransactionfn': maketransactionfn,
+        '_addrawrevisionfn': addrawrevisionfn,
     }
-    return type(r'ifileindextests', (ifileindextests,), d)
+    return type('ifileindextests', (ifileindextests,), d)
 
 
 def makeifiledatatests(makefilefn, maketransactionfn, addrawrevisionfn):
     d = {
-        r'_makefilefn': makefilefn,
-        r'_maketransactionfn': maketransactionfn,
-        r'_addrawrevisionfn': addrawrevisionfn,
+        '_makefilefn': makefilefn,
+        '_maketransactionfn': maketransactionfn,
+        '_addrawrevisionfn': addrawrevisionfn,
     }
-    return type(r'ifiledatatests', (ifiledatatests,), d)
+    return type('ifiledatatests', (ifiledatatests,), d)
 
 
 def makeifilemutationtests(makefilefn, maketransactionfn, addrawrevisionfn):
     d = {
-        r'_makefilefn': makefilefn,
-        r'_maketransactionfn': maketransactionfn,
-        r'_addrawrevisionfn': addrawrevisionfn,
+        '_makefilefn': makefilefn,
+        '_maketransactionfn': maketransactionfn,
+        '_addrawrevisionfn': addrawrevisionfn,
     }
-    return type(r'ifilemutationtests', (ifilemutationtests,), d)
+    return type('ifilemutationtests', (ifilemutationtests,), d)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/sha1dc/LICENSE.txt	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,30 @@
+MIT License
+
+Copyright (c) 2017:
+    Marc Stevens
+    Cryptology Group
+    Centrum Wiskunde & Informatica
+    P.O. Box 94079, 1090 GB Amsterdam, Netherlands
+    marc@marc-stevens.nl
+
+    Dan Shumow
+    Microsoft Research
+    danshu@microsoft.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/sha1dc/README.md	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,145 @@
+# sha1collisiondetection
+Library and command line tool to detect SHA-1 collisions in files
+
+Copyright 2017 Marc Stevens <marc@marc-stevens.nl>
+
+Distributed under the MIT Software License.
+
+See accompanying file LICENSE.txt or copy at https://opensource.org/licenses/MIT.
+
+## Developers
+
+- Marc Stevens, CWI Amsterdam (https://marc-stevens.nl)
+- Dan Shumow, Microsoft Research (https://www.microsoft.com/en-us/research/people/danshu/)
+
+## About
+This library and command line tool were designed as near drop-in replacements for common SHA-1 libraries and sha1sum.
+They will compute the SHA-1 hash of any given file and additionally will detect cryptanalytic collision attacks against SHA-1 present in each file. It is very fast and takes less than twice the amount of time as regular SHA-1.
+
+More specifically they will detect any cryptanalytic collision attack against SHA-1 using any of the top 32 SHA-1 disturbance vectors with probability 1:
+```
+    I(43,0), I(44,0), I(45,0), I(46,0), I(47,0), I(48,0), I(49,0), I(50,0), I(51,0), I(52,0),
+    I(46,2), I(47,2), I(48,2), I(49,2), I(50,2), I(51,2),
+    II(45,0), II(46,0), II(47,0), II(48,0), II(49,0), II(50,0), II(51,0), II(52,0), II(53,0), II(54,0), II(55,0), II(56,0),
+    II(46,2), II(49,2), II(50,2), II(51,2)
+```
+The possibility of false positives can be neglected as the probability is smaller than 2^-90.
+
+The library supports both an indicator flag that applications can check and act on, as well as a special _safe-hash_ mode that returns the real SHA-1 hash when no collision was detected and a different _safe_ hash when a collision was detected.
+Colliding files will have the same SHA-1 hash, but will have different unpredictable safe-hashes.
+This essentially enables protection of applications against SHA-1 collisions with no further changes in the application, e.g., digital signature forgeries based on SHA-1 collisions automatically become invalid.
+
+For the theoretical explanation of collision detection see the award-winning paper on _Counter-Cryptanalysis_:
+
+Counter-cryptanalysis, Marc Stevens, CRYPTO 2013, Lecture Notes in Computer Science, vol. 8042, Springer, 2013, pp. 129-146,
+https://marc-stevens.nl/research/papers/C13-S.pdf
+
+## Compiling
+
+Run:
+```
+make
+```
+
+## Command-line usage
+
+There are two programs `bin/sha1dcsum` and `bin/sha1dcsum_partialcoll`.
+The first program `bin/sha1dcsum` will detect and warn for files that were generated with a cryptanalytic SHA-1 collision attack like the one documented at https://shattered.io/.
+The second program `bin/sha1dcsum_partialcoll` will detect and warn for files that were generated with a cryptanalytic collision attack against reduced-round SHA-1 (of which there are a few examples so far).
+
+Examples:
+```
+bin/sha1dcsum test/sha1_reducedsha_coll.bin test/shattered-1.pdf
+bin/sha1dcsum_partialcoll test/sha1reducedsha_coll.bin test/shattered-1.pdf
+pipe_data | bin/sha1dcsum -
+```
+
+## Library usage
+
+See the documentation in `lib/sha1.h`. Here is a simple example code snippet:
+```
+#include <sha1dc/sha1.h>
+
+SHA1_CTX ctx;
+unsigned char hash[20];
+SHA1DCInit(&ctx);
+
+/** disable safe-hash mode (safe-hash mode is enabled by default) **/
+// SHA1DCSetSafeHash(&ctx, 0);
+/** disable use of unavoidable attack conditions to speed up detection (enabled by default) **/
+// SHA1DCSetUseUBC(&ctx, 0); 
+
+SHA1DCUpdate(&ctx, buffer, (unsigned)(size));
+
+int iscoll = SHA1DCFinal(hash,&ctx);
+if (iscoll)
+    printf("collision detected");
+else
+    printf("no collision detected");
+```
+
+## Inclusion in other programs
+
+In order to make it easier to include these sources in other project
+there are several preprocessor macros that the code uses. Rather than
+copy/pasting and customizing or specializing the code, first see if
+setting any of these defines appropriately will allow you to avoid
+modifying the code yourself.
+
+- SHA1DC_NO_STANDARD_INCLUDES
+
+ Skips including standard headers. Use this if your project for
+ whatever reason wishes to do its own header includes.
+
+- SHA1DC_CUSTOM_INCLUDE_SHA1_C
+
+  Includes a custom header at the top of sha1.c. Usually this would be
+  set in conjunction with SHA1DC_NO_STANDARD_INCLUDES to point to a
+  header file which includes various standard headers.
+
+- SHA1DC_INIT_SAFE_HASH_DEFAULT
+
+  Sets the default for safe_hash in SHA1DCInit(). Valid values are 0
+  and 1. If unset 1 is the default.
+
+- SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_C
+
+  Includes a custom trailer in sha1.c. Useful for any extra utility
+  functions that make use of the functions already defined in sha1.c.
+
+- SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_H
+
+  Includes a custom trailer in sha1.h. Useful for defining the
+  prototypes of the functions or code included by
+  SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_C.
+
+- SHA1DC_CUSTOM_INCLUDE_UBC_CHECK_C
+
+  Includes a custom header at the top of ubc_check.c.
+
+- SHA1DC_CUSTOM_TRAILING_INCLUDE_UBC_CHECK_C
+
+  Includes a custom trailer in ubc_check.c.
+
+- SHA1DC_CUSTOM_TRAILING_INCLUDE_UBC_CHECK_H
+
+  Includes a custom trailer in ubc_check.H.
+
+This code will try to auto-detect certain things based on
+CPU/platform. Unless you're running on some really obscure CPU or
+porting to a new platform you should not need to tweak this. If you do
+please open an issue at
+https://github.com/cr-marcstevens/sha1collisiondetection
+
+- SHA1DC_FORCE_LITTLEENDIAN / SHA1DC_FORCE_BIGENDIAN
+
+  Override the check for processor endianenss and force either
+  Little-Endian or Big-Endian.
+
+- SHA1DC_FORCE_UNALIGNED_ACCESS
+
+  Permit unaligned access. This will fail on e.g. SPARC processors, so
+  it's only permitted on a whitelist of processors. If your CPU isn't
+  detected as allowing this, and allows unaligned access, setting this
+  may improve performance (or make it worse, if the kernel has to
+  catch and emulate such access on its own).
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/sha1dc/cext.c	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,212 @@
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
+#include "lib/sha1.h"
+
+#if PY_MAJOR_VERSION >= 3
+#define IS_PY3K
+#endif
+
+/* helper to switch things like string literal depending on Python version */
+#ifdef IS_PY3K
+#define PY23(py2, py3) py3
+#else
+#define PY23(py2, py3) py2
+#endif
+
+static char sha1dc_doc[] = "Efficient detection of SHA1 collision constructs.";
+
+/* clang-format off */
+typedef struct {
+	PyObject_HEAD
+	SHA1_CTX ctx;
+} pysha1ctx;
+/* clang-format on */
+
+static int pysha1ctx_init(pysha1ctx *self, PyObject *args)
+{
+	Py_buffer data;
+	data.obj = NULL;
+
+	SHA1DCInit(&(self->ctx));
+	/* We don't want "safe" sha1s, wherein sha1dc can give you a
+	   different hash for something that's trying to give you a
+	   collision. We just want to detect collisions.
+	 */
+	SHA1DCSetSafeHash(&(self->ctx), 0);
+	if (!PyArg_ParseTuple(args, PY23("|s*", "|y*"), &data)) {
+		return -1;
+	}
+	if (data.obj) {
+		if (!PyBuffer_IsContiguous(&data, 'C') || data.ndim > 1) {
+			PyErr_SetString(PyExc_BufferError,
+			                "buffer must be contiguous and single dimension");
+			PyBuffer_Release(&data);
+			return -1;
+		}
+
+		SHA1DCUpdate(&(self->ctx), data.buf, data.len);
+		PyBuffer_Release(&data);
+	}
+	return 0;
+}
+
+static void pysha1ctx_dealloc(pysha1ctx *self)
+{
+	PyObject_Del(self);
+}
+
+static PyObject *pysha1ctx_update(pysha1ctx *self, PyObject *args)
+{
+	Py_buffer data;
+	if (!PyArg_ParseTuple(args, PY23("s*", "y*"), &data)) {
+		return NULL;
+	}
+	if (!PyBuffer_IsContiguous(&data, 'C') || data.ndim > 1) {
+		PyErr_SetString(PyExc_BufferError,
+		                "buffer must be contiguous and single dimension");
+		PyBuffer_Release(&data);
+		return NULL;
+	}
+	SHA1DCUpdate(&(self->ctx), data.buf, data.len);
+	PyBuffer_Release(&data);
+	Py_RETURN_NONE;
+}
+
+/* it is intentional that this take a ctx by value, as that clones the
+   context so we can keep using .update() without poisoning the state
+   with padding.
+*/
+static int finalize(SHA1_CTX ctx, unsigned char *hash_out)
+{
+	if (SHA1DCFinal(hash_out, &ctx)) {
+		PyErr_SetString(PyExc_OverflowError,
+		                "sha1 collision attack detected");
+		return 0;
+	}
+	return 1;
+}
+
+static PyObject *pysha1ctx_digest(pysha1ctx *self)
+{
+	unsigned char hash[20];
+	if (!finalize(self->ctx, hash)) {
+		return NULL;
+	}
+	return PyBytes_FromStringAndSize((char *)hash, 20);
+}
+
+static PyObject *pysha1ctx_hexdigest(pysha1ctx *self)
+{
+	static const char hexdigit[] = "0123456789abcdef";
+	unsigned char hash[20];
+	char hexhash[40];
+	int i;
+	if (!finalize(self->ctx, hash)) {
+		return NULL;
+	}
+	for (i = 0; i < 20; ++i) {
+		hexhash[i * 2] = hexdigit[hash[i] >> 4];
+		hexhash[i * 2 + 1] = hexdigit[hash[i] & 15];
+	}
+	return PY23(PyString_FromStringAndSize, PyUnicode_FromStringAndSize)(hexhash, 40);
+}
+
+static PyTypeObject sha1ctxType;
+
+static PyObject *pysha1ctx_copy(pysha1ctx *self)
+{
+	pysha1ctx *clone = (pysha1ctx *)PyObject_New(pysha1ctx, &sha1ctxType);
+	if (!clone) {
+		return NULL;
+	}
+	clone->ctx = self->ctx;
+	return (PyObject *)clone;
+}
+
+static PyMethodDef pysha1ctx_methods[] = {
+    {"update", (PyCFunction)pysha1ctx_update, METH_VARARGS,
+     "Update this hash object's state with the provided bytes."},
+    {"digest", (PyCFunction)pysha1ctx_digest, METH_NOARGS,
+     "Return the digest value as a string of binary data."},
+    {"hexdigest", (PyCFunction)pysha1ctx_hexdigest, METH_NOARGS,
+     "Return the digest value as a string of hexadecimal digits."},
+    {"copy", (PyCFunction)pysha1ctx_copy, METH_NOARGS,
+     "Return a copy of the hash object."},
+    {NULL},
+};
+
+/* clang-format off */
+static PyTypeObject sha1ctxType = {
+	PyVarObject_HEAD_INIT(NULL, 0)                    /* header */
+	"sha1dc.sha1",                                    /* tp_name */
+	sizeof(pysha1ctx),                                /* tp_basicsize */
+	0,                                                /* tp_itemsize */
+	(destructor)pysha1ctx_dealloc,                    /* tp_dealloc */
+	0,                                                /* tp_print */
+	0,                                                /* tp_getattr */
+	0,                                                /* tp_setattr */
+	0,                                                /* tp_compare */
+	0,                                                /* tp_repr */
+	0,                                                /* tp_as_number */
+	0,                                                /* tp_as_sequence */
+	0,                                                /* tp_as_mapping */
+	0,                                                /* tp_hash */
+	0,                                                /* tp_call */
+	0,                                                /* tp_str */
+	0,                                                /* tp_getattro */
+	0,                                                /* tp_setattro */
+	0,                                                /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,                               /* tp_flags */
+	"sha1 implementation that looks for collisions",  /* tp_doc */
+	0,                                                /* tp_traverse */
+	0,                                                /* tp_clear */
+	0,                                                /* tp_richcompare */
+	0,                                                /* tp_weaklistoffset */
+	0,                                                /* tp_iter */
+	0,                                                /* tp_iternext */
+	pysha1ctx_methods,                                /* tp_methods */
+	0,                                                /* tp_members */
+	0,                                                /* tp_getset */
+	0,                                                /* tp_base */
+	0,                                                /* tp_dict */
+	0,                                                /* tp_descr_get */
+	0,                                                /* tp_descr_set */
+	0,                                                /* tp_dictoffset */
+	(initproc)pysha1ctx_init,                         /* tp_init */
+	0,                                                /* tp_alloc */
+};
+/* clang-format on */
+
+static PyMethodDef methods[] = {
+    {NULL, NULL},
+};
+
+static void module_init(PyObject *mod)
+{
+	sha1ctxType.tp_new = PyType_GenericNew;
+	if (PyType_Ready(&sha1ctxType) < 0) {
+		return;
+	}
+	Py_INCREF(&sha1ctxType);
+
+	PyModule_AddObject(mod, "sha1", (PyObject *)&sha1ctxType);
+}
+
+#ifdef IS_PY3K
+static struct PyModuleDef sha1dc_module = {PyModuleDef_HEAD_INIT, "sha1dc",
+                                           sha1dc_doc, -1, methods};
+
+PyMODINIT_FUNC PyInit_sha1dc(void)
+{
+	PyObject *mod = PyModule_Create(&sha1dc_module);
+	module_init(mod);
+	return mod;
+}
+#else
+PyMODINIT_FUNC initsha1dc(void)
+{
+	PyObject *mod = Py_InitModule3("sha1dc", methods, sha1dc_doc);
+	module_init(mod);
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/sha1dc/lib/sha1.c	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,1911 @@
+/***
+* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow (danshu@microsoft.com)
+* Distributed under the MIT Software License.
+* See accompanying file LICENSE.txt or copy at
+* https://opensource.org/licenses/MIT
+***/
+
+#ifndef SHA1DC_NO_STANDARD_INCLUDES
+#include <string.h>
+#include <memory.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef __unix__
+#include <sys/types.h> /* make sure macros like _BIG_ENDIAN visible */
+#endif
+#endif
+
+#ifdef SHA1DC_CUSTOM_INCLUDE_SHA1_C
+#include SHA1DC_CUSTOM_INCLUDE_SHA1_C
+#endif
+
+#ifndef SHA1DC_INIT_SAFE_HASH_DEFAULT
+#define SHA1DC_INIT_SAFE_HASH_DEFAULT 1
+#endif
+
+#include "sha1.h"
+#include "ubc_check.h"
+
+#if (defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || \
+     defined(i386) || defined(__i386) || defined(__i386__) || defined(__i486__)  || \
+     defined(__i586__) || defined(__i686__) || defined(_M_IX86) || defined(__X86__) || \
+     defined(_X86_) || defined(__THW_INTEL__) || defined(__I86__) || defined(__INTEL__) || \
+     defined(__386) || defined(_M_X64) || defined(_M_AMD64))
+#define SHA1DC_ON_INTEL_LIKE_PROCESSOR
+#endif
+
+/*
+   Because Little-Endian architectures are most common,
+   we only set SHA1DC_BIGENDIAN if one of these conditions is met.
+   Note that all MSFT platforms are little endian,
+   so none of these will be defined under the MSC compiler.
+   If you are compiling on a big endian platform and your compiler does not define one of these,
+   you will have to add whatever macros your tool chain defines to indicate Big-Endianness.
+ */
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__)
+/*
+ * Should detect Big Endian under GCC since at least 4.6.0 (gcc svn
+ * rev #165881). See
+ * https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
+ *
+ * This also works under clang since 3.2, it copied the GCC-ism. See
+ * clang.git's 3b198a97d2 ("Preprocessor: add __BYTE_ORDER__
+ * predefined macro", 2012-07-27)
+ */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define SHA1DC_BIGENDIAN
+#endif
+
+/* Not under GCC-alike */
+#elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN)
+/*
+ * Should detect Big Endian under glibc.git since 14245eb70e ("entered
+ * into RCS", 1992-11-25). Defined in <endian.h> which will have been
+ * brought in by standard headers. See glibc.git and
+ * https://sourceforge.net/p/predef/wiki/Endianness/
+ */
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define SHA1DC_BIGENDIAN
+#endif
+
+/* Not under GCC-alike or glibc */
+#elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN)
+/*
+ * *BSD and newlib (embeded linux, cygwin, etc).
+ * the defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN) part prevents
+ * this condition from matching with Solaris/sparc.
+ * (Solaris defines only one endian macro)
+ */
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define SHA1DC_BIGENDIAN
+#endif
+
+/* Not under GCC-alike or glibc or *BSD or newlib */
+#elif (defined(__ARMEB__) || defined(__THUMBEB__) || defined(__AARCH64EB__) || \
+       defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || \
+       defined(__sparc))
+/*
+ * Should define Big Endian for a whitelist of known processors. See
+ * https://sourceforge.net/p/predef/wiki/Endianness/ and
+ * http://www.oracle.com/technetwork/server-storage/solaris/portingtosolaris-138514.html
+ */
+#define SHA1DC_BIGENDIAN
+
+/* Not under GCC-alike or glibc or *BSD or newlib or <processor whitelist> */
+#elif (defined(_AIX) || defined(__hpux))
+
+/*
+ * Defines Big Endian on a whitelist of OSs that are known to be Big
+ * Endian-only. See
+ * https://public-inbox.org/git/93056823-2740-d072-1ebd-46b440b33d7e@felt.demon.nl/
+ */
+#define SHA1DC_BIGENDIAN
+
+/* Not under GCC-alike or glibc or *BSD or newlib or <processor whitelist> or <os whitelist> */
+#elif defined(SHA1DC_ON_INTEL_LIKE_PROCESSOR)
+/*
+ * As a last resort before we do anything else we're not 100% sure
+ * about below, we blacklist specific processors here. We could add
+ * more, see e.g. https://wiki.debian.org/ArchitectureSpecificsMemo
+ */
+#else /* Not under GCC-alike or glibc or *BSD or newlib or <processor whitelist> or <os whitelist> or <processor blacklist> */
+
+/* We do nothing more here for now */
+/*#error "Uncomment this to see if you fall through all the detection"*/
+
+#endif /* Big Endian detection */
+
+#if (defined(SHA1DC_FORCE_LITTLEENDIAN) && defined(SHA1DC_BIGENDIAN))
+#undef SHA1DC_BIGENDIAN
+#endif
+#if (defined(SHA1DC_FORCE_BIGENDIAN) && !defined(SHA1DC_BIGENDIAN))
+#define SHA1DC_BIGENDIAN
+#endif
+/*ENDIANNESS SELECTION*/
+
+#ifndef SHA1DC_FORCE_ALIGNED_ACCESS
+#if defined(SHA1DC_FORCE_UNALIGNED_ACCESS) || defined(SHA1DC_ON_INTEL_LIKE_PROCESSOR)
+#define SHA1DC_ALLOW_UNALIGNED_ACCESS
+#endif /*UNALIGNED ACCESS DETECTION*/
+#endif /*FORCE ALIGNED ACCESS*/
+
+#define rotate_right(x,n) (((x)>>(n))|((x)<<(32-(n))))
+#define rotate_left(x,n)  (((x)<<(n))|((x)>>(32-(n))))
+
+#define sha1_bswap32(x) \
+	{x = ((x << 8) & 0xFF00FF00) | ((x >> 8) & 0xFF00FF); x = (x << 16) | (x >> 16);}
+
+#define sha1_mix(W, t)  (rotate_left(W[t - 3] ^ W[t - 8] ^ W[t - 14] ^ W[t - 16], 1))
+
+#ifdef SHA1DC_BIGENDIAN
+	#define sha1_load(m, t, temp)  { temp = m[t]; }
+#else
+	#define sha1_load(m, t, temp)  { temp = m[t]; sha1_bswap32(temp); }
+#endif
+
+#define sha1_store(W, t, x)	*(volatile uint32_t *)&W[t] = x
+
+#define sha1_f1(b,c,d) ((d)^((b)&((c)^(d))))
+#define sha1_f2(b,c,d) ((b)^(c)^(d))
+#define sha1_f3(b,c,d) (((b)&(c))+((d)&((b)^(c))))
+#define sha1_f4(b,c,d) ((b)^(c)^(d))
+
+#define HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, m, t) \
+	{ e += rotate_left(a, 5) + sha1_f1(b,c,d) + 0x5A827999 + m[t]; b = rotate_left(b, 30); }
+#define HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, m, t) \
+	{ e += rotate_left(a, 5) + sha1_f2(b,c,d) + 0x6ED9EBA1 + m[t]; b = rotate_left(b, 30); }
+#define HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, m, t) \
+	{ e += rotate_left(a, 5) + sha1_f3(b,c,d) + 0x8F1BBCDC + m[t]; b = rotate_left(b, 30); }
+#define HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, m, t) \
+	{ e += rotate_left(a, 5) + sha1_f4(b,c,d) + 0xCA62C1D6 + m[t]; b = rotate_left(b, 30); }
+
+#define HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(a, b, c, d, e, m, t) \
+	{ b = rotate_right(b, 30); e -= rotate_left(a, 5) + sha1_f1(b,c,d) + 0x5A827999 + m[t]; }
+#define HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(a, b, c, d, e, m, t) \
+	{ b = rotate_right(b, 30); e -= rotate_left(a, 5) + sha1_f2(b,c,d) + 0x6ED9EBA1 + m[t]; }
+#define HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(a, b, c, d, e, m, t) \
+	{ b = rotate_right(b, 30); e -= rotate_left(a, 5) + sha1_f3(b,c,d) + 0x8F1BBCDC + m[t]; }
+#define HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(a, b, c, d, e, m, t) \
+	{ b = rotate_right(b, 30); e -= rotate_left(a, 5) + sha1_f4(b,c,d) + 0xCA62C1D6 + m[t]; }
+
+#define SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(a, b, c, d, e, m, W, t, temp) \
+	{sha1_load(m, t, temp); sha1_store(W, t, temp); e += temp + rotate_left(a, 5) + sha1_f1(b,c,d) + 0x5A827999; b = rotate_left(b, 30);}
+
+#define SHA1COMPRESS_FULL_ROUND1_STEP_EXPAND(a, b, c, d, e, W, t, temp) \
+	{temp = sha1_mix(W, t); sha1_store(W, t, temp); e += temp + rotate_left(a, 5) + sha1_f1(b,c,d) + 0x5A827999; b = rotate_left(b, 30); }
+
+#define SHA1COMPRESS_FULL_ROUND2_STEP(a, b, c, d, e, W, t, temp) \
+	{temp = sha1_mix(W, t); sha1_store(W, t, temp); e += temp + rotate_left(a, 5) + sha1_f2(b,c,d) + 0x6ED9EBA1; b = rotate_left(b, 30); }
+
+#define SHA1COMPRESS_FULL_ROUND3_STEP(a, b, c, d, e, W, t, temp) \
+	{temp = sha1_mix(W, t); sha1_store(W, t, temp); e += temp + rotate_left(a, 5) + sha1_f3(b,c,d) + 0x8F1BBCDC; b = rotate_left(b, 30); }
+
+#define SHA1COMPRESS_FULL_ROUND4_STEP(a, b, c, d, e, W, t, temp) \
+	{temp = sha1_mix(W, t); sha1_store(W, t, temp); e += temp + rotate_left(a, 5) + sha1_f4(b,c,d) + 0xCA62C1D6; b = rotate_left(b, 30); }
+
+
+#define SHA1_STORE_STATE(i) states[i][0] = a; states[i][1] = b; states[i][2] = c; states[i][3] = d; states[i][4] = e;
+
+#ifdef BUILDNOCOLLDETECTSHA1COMPRESSION
+void sha1_compression(uint32_t ihv[5], const uint32_t m[16])
+{
+	uint32_t W[80];
+	uint32_t a,b,c,d,e;
+	unsigned i;
+
+	memcpy(W, m, 16 * 4);
+	for (i = 16; i < 80; ++i)
+		W[i] = sha1_mix(W, i);
+
+	a = ihv[0]; b = ihv[1]; c = ihv[2]; d = ihv[3]; e = ihv[4];
+
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 0);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 1);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 2);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 3);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 4);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 5);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 6);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 7);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 8);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 9);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 10);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 11);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 12);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 13);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 14);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 15);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 16);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 17);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 18);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 19);
+
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 20);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 21);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 22);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 23);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 24);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 25);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 26);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 27);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 28);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 29);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 30);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 31);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 32);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 33);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 34);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 35);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 36);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 37);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 38);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 39);
+
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 40);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 41);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 42);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 43);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 44);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 45);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 46);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 47);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 48);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 49);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 50);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 51);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 52);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 53);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 54);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 55);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 56);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 57);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 58);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 59);
+
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 60);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 61);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 62);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 63);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 64);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 65);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 66);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 67);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 68);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 69);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 70);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 71);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 72);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 73);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 74);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 75);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 76);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 77);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 78);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 79);
+
+	ihv[0] += a; ihv[1] += b; ihv[2] += c; ihv[3] += d; ihv[4] += e;
+}
+#endif /*BUILDNOCOLLDETECTSHA1COMPRESSION*/
+
+
+static void sha1_compression_W(uint32_t ihv[5], const uint32_t W[80])
+{
+	uint32_t a = ihv[0], b = ihv[1], c = ihv[2], d = ihv[3], e = ihv[4];
+
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 0);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 1);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 2);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 3);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 4);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 5);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 6);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 7);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 8);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 9);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 10);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 11);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 12);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 13);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 14);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, W, 15);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, W, 16);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, W, 17);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, W, 18);
+	HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, W, 19);
+
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 20);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 21);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 22);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 23);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 24);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 25);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 26);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 27);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 28);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 29);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 30);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 31);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 32);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 33);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 34);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, W, 35);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, W, 36);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, W, 37);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, W, 38);
+	HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, W, 39);
+
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 40);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 41);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 42);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 43);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 44);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 45);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 46);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 47);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 48);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 49);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 50);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 51);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 52);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 53);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 54);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, W, 55);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, W, 56);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, W, 57);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, W, 58);
+	HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, W, 59);
+
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 60);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 61);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 62);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 63);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 64);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 65);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 66);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 67);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 68);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 69);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 70);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 71);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 72);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 73);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 74);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, W, 75);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, W, 76);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, W, 77);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, W, 78);
+	HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, W, 79);
+
+	ihv[0] += a; ihv[1] += b; ihv[2] += c; ihv[3] += d; ihv[4] += e;
+}
+
+
+
+void sha1_compression_states(uint32_t ihv[5], const uint32_t m[16], uint32_t W[80], uint32_t states[80][5])
+{
+	uint32_t a = ihv[0], b = ihv[1], c = ihv[2], d = ihv[3], e = ihv[4];
+	uint32_t temp;
+
+#ifdef DOSTORESTATE00
+	SHA1_STORE_STATE(0)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(a, b, c, d, e, m, W, 0, temp);
+
+#ifdef DOSTORESTATE01
+	SHA1_STORE_STATE(1)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(e, a, b, c, d, m, W, 1, temp);
+
+#ifdef DOSTORESTATE02
+	SHA1_STORE_STATE(2)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(d, e, a, b, c, m, W, 2, temp);
+
+#ifdef DOSTORESTATE03
+	SHA1_STORE_STATE(3)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(c, d, e, a, b, m, W, 3, temp);
+
+#ifdef DOSTORESTATE04
+	SHA1_STORE_STATE(4)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(b, c, d, e, a, m, W, 4, temp);
+
+#ifdef DOSTORESTATE05
+	SHA1_STORE_STATE(5)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(a, b, c, d, e, m, W, 5, temp);
+
+#ifdef DOSTORESTATE06
+	SHA1_STORE_STATE(6)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(e, a, b, c, d, m, W, 6, temp);
+
+#ifdef DOSTORESTATE07
+	SHA1_STORE_STATE(7)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(d, e, a, b, c, m, W, 7, temp);
+
+#ifdef DOSTORESTATE08
+	SHA1_STORE_STATE(8)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(c, d, e, a, b, m, W, 8, temp);
+
+#ifdef DOSTORESTATE09
+	SHA1_STORE_STATE(9)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(b, c, d, e, a, m, W, 9, temp);
+
+#ifdef DOSTORESTATE10
+	SHA1_STORE_STATE(10)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(a, b, c, d, e, m, W, 10, temp);
+
+#ifdef DOSTORESTATE11
+	SHA1_STORE_STATE(11)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(e, a, b, c, d, m, W, 11, temp);
+
+#ifdef DOSTORESTATE12
+	SHA1_STORE_STATE(12)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(d, e, a, b, c, m, W, 12, temp);
+
+#ifdef DOSTORESTATE13
+	SHA1_STORE_STATE(13)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(c, d, e, a, b, m, W, 13, temp);
+
+#ifdef DOSTORESTATE14
+	SHA1_STORE_STATE(14)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(b, c, d, e, a, m, W, 14, temp);
+
+#ifdef DOSTORESTATE15
+	SHA1_STORE_STATE(15)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_LOAD(a, b, c, d, e, m, W, 15, temp);
+
+#ifdef DOSTORESTATE16
+	SHA1_STORE_STATE(16)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_EXPAND(e, a, b, c, d, W, 16, temp);
+
+#ifdef DOSTORESTATE17
+	SHA1_STORE_STATE(17)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_EXPAND(d, e, a, b, c, W, 17, temp);
+
+#ifdef DOSTORESTATE18
+	SHA1_STORE_STATE(18)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_EXPAND(c, d, e, a, b, W, 18, temp);
+
+#ifdef DOSTORESTATE19
+	SHA1_STORE_STATE(19)
+#endif
+	SHA1COMPRESS_FULL_ROUND1_STEP_EXPAND(b, c, d, e, a, W, 19, temp);
+
+
+
+#ifdef DOSTORESTATE20
+	SHA1_STORE_STATE(20)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(a, b, c, d, e, W, 20, temp);
+
+#ifdef DOSTORESTATE21
+	SHA1_STORE_STATE(21)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(e, a, b, c, d, W, 21, temp);
+
+#ifdef DOSTORESTATE22
+	SHA1_STORE_STATE(22)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(d, e, a, b, c, W, 22, temp);
+
+#ifdef DOSTORESTATE23
+	SHA1_STORE_STATE(23)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(c, d, e, a, b, W, 23, temp);
+
+#ifdef DOSTORESTATE24
+	SHA1_STORE_STATE(24)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(b, c, d, e, a, W, 24, temp);
+
+#ifdef DOSTORESTATE25
+	SHA1_STORE_STATE(25)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(a, b, c, d, e, W, 25, temp);
+
+#ifdef DOSTORESTATE26
+	SHA1_STORE_STATE(26)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(e, a, b, c, d, W, 26, temp);
+
+#ifdef DOSTORESTATE27
+	SHA1_STORE_STATE(27)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(d, e, a, b, c, W, 27, temp);
+
+#ifdef DOSTORESTATE28
+	SHA1_STORE_STATE(28)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(c, d, e, a, b, W, 28, temp);
+
+#ifdef DOSTORESTATE29
+	SHA1_STORE_STATE(29)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(b, c, d, e, a, W, 29, temp);
+
+#ifdef DOSTORESTATE30
+	SHA1_STORE_STATE(30)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(a, b, c, d, e, W, 30, temp);
+
+#ifdef DOSTORESTATE31
+	SHA1_STORE_STATE(31)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(e, a, b, c, d, W, 31, temp);
+
+#ifdef DOSTORESTATE32
+	SHA1_STORE_STATE(32)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(d, e, a, b, c, W, 32, temp);
+
+#ifdef DOSTORESTATE33
+	SHA1_STORE_STATE(33)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(c, d, e, a, b, W, 33, temp);
+
+#ifdef DOSTORESTATE34
+	SHA1_STORE_STATE(34)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(b, c, d, e, a, W, 34, temp);
+
+#ifdef DOSTORESTATE35
+	SHA1_STORE_STATE(35)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(a, b, c, d, e, W, 35, temp);
+
+#ifdef DOSTORESTATE36
+	SHA1_STORE_STATE(36)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(e, a, b, c, d, W, 36, temp);
+
+#ifdef DOSTORESTATE37
+	SHA1_STORE_STATE(37)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(d, e, a, b, c, W, 37, temp);
+
+#ifdef DOSTORESTATE38
+	SHA1_STORE_STATE(38)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(c, d, e, a, b, W, 38, temp);
+
+#ifdef DOSTORESTATE39
+	SHA1_STORE_STATE(39)
+#endif
+	SHA1COMPRESS_FULL_ROUND2_STEP(b, c, d, e, a, W, 39, temp);
+
+
+
+#ifdef DOSTORESTATE40
+	SHA1_STORE_STATE(40)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(a, b, c, d, e, W, 40, temp);
+
+#ifdef DOSTORESTATE41
+	SHA1_STORE_STATE(41)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(e, a, b, c, d, W, 41, temp);
+
+#ifdef DOSTORESTATE42
+	SHA1_STORE_STATE(42)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(d, e, a, b, c, W, 42, temp);
+
+#ifdef DOSTORESTATE43
+	SHA1_STORE_STATE(43)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(c, d, e, a, b, W, 43, temp);
+
+#ifdef DOSTORESTATE44
+	SHA1_STORE_STATE(44)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(b, c, d, e, a, W, 44, temp);
+
+#ifdef DOSTORESTATE45
+	SHA1_STORE_STATE(45)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(a, b, c, d, e, W, 45, temp);
+
+#ifdef DOSTORESTATE46
+	SHA1_STORE_STATE(46)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(e, a, b, c, d, W, 46, temp);
+
+#ifdef DOSTORESTATE47
+	SHA1_STORE_STATE(47)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(d, e, a, b, c, W, 47, temp);
+
+#ifdef DOSTORESTATE48
+	SHA1_STORE_STATE(48)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(c, d, e, a, b, W, 48, temp);
+
+#ifdef DOSTORESTATE49
+	SHA1_STORE_STATE(49)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(b, c, d, e, a, W, 49, temp);
+
+#ifdef DOSTORESTATE50
+	SHA1_STORE_STATE(50)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(a, b, c, d, e, W, 50, temp);
+
+#ifdef DOSTORESTATE51
+	SHA1_STORE_STATE(51)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(e, a, b, c, d, W, 51, temp);
+
+#ifdef DOSTORESTATE52
+	SHA1_STORE_STATE(52)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(d, e, a, b, c, W, 52, temp);
+
+#ifdef DOSTORESTATE53
+	SHA1_STORE_STATE(53)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(c, d, e, a, b, W, 53, temp);
+
+#ifdef DOSTORESTATE54
+	SHA1_STORE_STATE(54)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(b, c, d, e, a, W, 54, temp);
+
+#ifdef DOSTORESTATE55
+	SHA1_STORE_STATE(55)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(a, b, c, d, e, W, 55, temp);
+
+#ifdef DOSTORESTATE56
+	SHA1_STORE_STATE(56)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(e, a, b, c, d, W, 56, temp);
+
+#ifdef DOSTORESTATE57
+	SHA1_STORE_STATE(57)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(d, e, a, b, c, W, 57, temp);
+
+#ifdef DOSTORESTATE58
+	SHA1_STORE_STATE(58)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(c, d, e, a, b, W, 58, temp);
+
+#ifdef DOSTORESTATE59
+	SHA1_STORE_STATE(59)
+#endif
+	SHA1COMPRESS_FULL_ROUND3_STEP(b, c, d, e, a, W, 59, temp);
+
+
+
+
+#ifdef DOSTORESTATE60
+	SHA1_STORE_STATE(60)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(a, b, c, d, e, W, 60, temp);
+
+#ifdef DOSTORESTATE61
+	SHA1_STORE_STATE(61)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(e, a, b, c, d, W, 61, temp);
+
+#ifdef DOSTORESTATE62
+	SHA1_STORE_STATE(62)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(d, e, a, b, c, W, 62, temp);
+
+#ifdef DOSTORESTATE63
+	SHA1_STORE_STATE(63)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(c, d, e, a, b, W, 63, temp);
+
+#ifdef DOSTORESTATE64
+	SHA1_STORE_STATE(64)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(b, c, d, e, a, W, 64, temp);
+
+#ifdef DOSTORESTATE65
+	SHA1_STORE_STATE(65)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(a, b, c, d, e, W, 65, temp);
+
+#ifdef DOSTORESTATE66
+	SHA1_STORE_STATE(66)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(e, a, b, c, d, W, 66, temp);
+
+#ifdef DOSTORESTATE67
+	SHA1_STORE_STATE(67)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(d, e, a, b, c, W, 67, temp);
+
+#ifdef DOSTORESTATE68
+	SHA1_STORE_STATE(68)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(c, d, e, a, b, W, 68, temp);
+
+#ifdef DOSTORESTATE69
+	SHA1_STORE_STATE(69)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(b, c, d, e, a, W, 69, temp);
+
+#ifdef DOSTORESTATE70
+	SHA1_STORE_STATE(70)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(a, b, c, d, e, W, 70, temp);
+
+#ifdef DOSTORESTATE71
+	SHA1_STORE_STATE(71)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(e, a, b, c, d, W, 71, temp);
+
+#ifdef DOSTORESTATE72
+	SHA1_STORE_STATE(72)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(d, e, a, b, c, W, 72, temp);
+
+#ifdef DOSTORESTATE73
+	SHA1_STORE_STATE(73)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(c, d, e, a, b, W, 73, temp);
+
+#ifdef DOSTORESTATE74
+	SHA1_STORE_STATE(74)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(b, c, d, e, a, W, 74, temp);
+
+#ifdef DOSTORESTATE75
+	SHA1_STORE_STATE(75)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(a, b, c, d, e, W, 75, temp);
+
+#ifdef DOSTORESTATE76
+	SHA1_STORE_STATE(76)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(e, a, b, c, d, W, 76, temp);
+
+#ifdef DOSTORESTATE77
+	SHA1_STORE_STATE(77)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(d, e, a, b, c, W, 77, temp);
+
+#ifdef DOSTORESTATE78
+	SHA1_STORE_STATE(78)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(c, d, e, a, b, W, 78, temp);
+
+#ifdef DOSTORESTATE79
+	SHA1_STORE_STATE(79)
+#endif
+	SHA1COMPRESS_FULL_ROUND4_STEP(b, c, d, e, a, W, 79, temp);
+
+
+
+	ihv[0] += a; ihv[1] += b; ihv[2] += c; ihv[3] += d; ihv[4] += e;
+}
+
+
+
+
+#define SHA1_RECOMPRESS(t) \
+static void sha1recompress_fast_ ## t (uint32_t ihvin[5], uint32_t ihvout[5], const uint32_t me2[80], const uint32_t state[5]) \
+{ \
+	uint32_t a = state[0], b = state[1], c = state[2], d = state[3], e = state[4]; \
+	if (t > 79) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(b, c, d, e, a, me2, 79); \
+	if (t > 78) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(c, d, e, a, b, me2, 78); \
+	if (t > 77) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(d, e, a, b, c, me2, 77); \
+	if (t > 76) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(e, a, b, c, d, me2, 76); \
+	if (t > 75) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(a, b, c, d, e, me2, 75); \
+	if (t > 74) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(b, c, d, e, a, me2, 74); \
+	if (t > 73) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(c, d, e, a, b, me2, 73); \
+	if (t > 72) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(d, e, a, b, c, me2, 72); \
+	if (t > 71) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(e, a, b, c, d, me2, 71); \
+	if (t > 70) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(a, b, c, d, e, me2, 70); \
+	if (t > 69) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(b, c, d, e, a, me2, 69); \
+	if (t > 68) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(c, d, e, a, b, me2, 68); \
+	if (t > 67) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(d, e, a, b, c, me2, 67); \
+	if (t > 66) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(e, a, b, c, d, me2, 66); \
+	if (t > 65) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(a, b, c, d, e, me2, 65); \
+	if (t > 64) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(b, c, d, e, a, me2, 64); \
+	if (t > 63) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(c, d, e, a, b, me2, 63); \
+	if (t > 62) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(d, e, a, b, c, me2, 62); \
+	if (t > 61) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(e, a, b, c, d, me2, 61); \
+	if (t > 60) HASHCLASH_SHA1COMPRESS_ROUND4_STEP_BW(a, b, c, d, e, me2, 60); \
+	if (t > 59) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(b, c, d, e, a, me2, 59); \
+	if (t > 58) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(c, d, e, a, b, me2, 58); \
+	if (t > 57) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(d, e, a, b, c, me2, 57); \
+	if (t > 56) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(e, a, b, c, d, me2, 56); \
+	if (t > 55) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(a, b, c, d, e, me2, 55); \
+	if (t > 54) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(b, c, d, e, a, me2, 54); \
+	if (t > 53) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(c, d, e, a, b, me2, 53); \
+	if (t > 52) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(d, e, a, b, c, me2, 52); \
+	if (t > 51) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(e, a, b, c, d, me2, 51); \
+	if (t > 50) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(a, b, c, d, e, me2, 50); \
+	if (t > 49) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(b, c, d, e, a, me2, 49); \
+	if (t > 48) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(c, d, e, a, b, me2, 48); \
+	if (t > 47) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(d, e, a, b, c, me2, 47); \
+	if (t > 46) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(e, a, b, c, d, me2, 46); \
+	if (t > 45) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(a, b, c, d, e, me2, 45); \
+	if (t > 44) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(b, c, d, e, a, me2, 44); \
+	if (t > 43) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(c, d, e, a, b, me2, 43); \
+	if (t > 42) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(d, e, a, b, c, me2, 42); \
+	if (t > 41) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(e, a, b, c, d, me2, 41); \
+	if (t > 40) HASHCLASH_SHA1COMPRESS_ROUND3_STEP_BW(a, b, c, d, e, me2, 40); \
+	if (t > 39) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(b, c, d, e, a, me2, 39); \
+	if (t > 38) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(c, d, e, a, b, me2, 38); \
+	if (t > 37) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(d, e, a, b, c, me2, 37); \
+	if (t > 36) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(e, a, b, c, d, me2, 36); \
+	if (t > 35) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(a, b, c, d, e, me2, 35); \
+	if (t > 34) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(b, c, d, e, a, me2, 34); \
+	if (t > 33) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(c, d, e, a, b, me2, 33); \
+	if (t > 32) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(d, e, a, b, c, me2, 32); \
+	if (t > 31) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(e, a, b, c, d, me2, 31); \
+	if (t > 30) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(a, b, c, d, e, me2, 30); \
+	if (t > 29) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(b, c, d, e, a, me2, 29); \
+	if (t > 28) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(c, d, e, a, b, me2, 28); \
+	if (t > 27) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(d, e, a, b, c, me2, 27); \
+	if (t > 26) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(e, a, b, c, d, me2, 26); \
+	if (t > 25) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(a, b, c, d, e, me2, 25); \
+	if (t > 24) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(b, c, d, e, a, me2, 24); \
+	if (t > 23) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(c, d, e, a, b, me2, 23); \
+	if (t > 22) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(d, e, a, b, c, me2, 22); \
+	if (t > 21) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(e, a, b, c, d, me2, 21); \
+	if (t > 20) HASHCLASH_SHA1COMPRESS_ROUND2_STEP_BW(a, b, c, d, e, me2, 20); \
+	if (t > 19) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(b, c, d, e, a, me2, 19); \
+	if (t > 18) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(c, d, e, a, b, me2, 18); \
+	if (t > 17) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(d, e, a, b, c, me2, 17); \
+	if (t > 16) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(e, a, b, c, d, me2, 16); \
+	if (t > 15) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(a, b, c, d, e, me2, 15); \
+	if (t > 14) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(b, c, d, e, a, me2, 14); \
+	if (t > 13) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(c, d, e, a, b, me2, 13); \
+	if (t > 12) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(d, e, a, b, c, me2, 12); \
+	if (t > 11) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(e, a, b, c, d, me2, 11); \
+	if (t > 10) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(a, b, c, d, e, me2, 10); \
+	if (t > 9) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(b, c, d, e, a, me2, 9); \
+	if (t > 8) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(c, d, e, a, b, me2, 8); \
+	if (t > 7) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(d, e, a, b, c, me2, 7); \
+	if (t > 6) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(e, a, b, c, d, me2, 6); \
+	if (t > 5) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(a, b, c, d, e, me2, 5); \
+	if (t > 4) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(b, c, d, e, a, me2, 4); \
+	if (t > 3) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(c, d, e, a, b, me2, 3); \
+	if (t > 2) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(d, e, a, b, c, me2, 2); \
+	if (t > 1) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(e, a, b, c, d, me2, 1); \
+	if (t > 0) HASHCLASH_SHA1COMPRESS_ROUND1_STEP_BW(a, b, c, d, e, me2, 0); \
+	ihvin[0] = a; ihvin[1] = b; ihvin[2] = c; ihvin[3] = d; ihvin[4] = e; \
+	a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; \
+	if (t <= 0) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, me2, 0); \
+	if (t <= 1) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, me2, 1); \
+	if (t <= 2) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, me2, 2); \
+	if (t <= 3) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, me2, 3); \
+	if (t <= 4) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, me2, 4); \
+	if (t <= 5) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, me2, 5); \
+	if (t <= 6) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, me2, 6); \
+	if (t <= 7) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, me2, 7); \
+	if (t <= 8) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, me2, 8); \
+	if (t <= 9) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, me2, 9); \
+	if (t <= 10) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, me2, 10); \
+	if (t <= 11) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, me2, 11); \
+	if (t <= 12) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, me2, 12); \
+	if (t <= 13) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, me2, 13); \
+	if (t <= 14) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, me2, 14); \
+	if (t <= 15) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(a, b, c, d, e, me2, 15); \
+	if (t <= 16) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(e, a, b, c, d, me2, 16); \
+	if (t <= 17) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(d, e, a, b, c, me2, 17); \
+	if (t <= 18) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(c, d, e, a, b, me2, 18); \
+	if (t <= 19) HASHCLASH_SHA1COMPRESS_ROUND1_STEP(b, c, d, e, a, me2, 19); \
+	if (t <= 20) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, me2, 20); \
+	if (t <= 21) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, me2, 21); \
+	if (t <= 22) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, me2, 22); \
+	if (t <= 23) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, me2, 23); \
+	if (t <= 24) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, me2, 24); \
+	if (t <= 25) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, me2, 25); \
+	if (t <= 26) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, me2, 26); \
+	if (t <= 27) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, me2, 27); \
+	if (t <= 28) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, me2, 28); \
+	if (t <= 29) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, me2, 29); \
+	if (t <= 30) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, me2, 30); \
+	if (t <= 31) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, me2, 31); \
+	if (t <= 32) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, me2, 32); \
+	if (t <= 33) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, me2, 33); \
+	if (t <= 34) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, me2, 34); \
+	if (t <= 35) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(a, b, c, d, e, me2, 35); \
+	if (t <= 36) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(e, a, b, c, d, me2, 36); \
+	if (t <= 37) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(d, e, a, b, c, me2, 37); \
+	if (t <= 38) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(c, d, e, a, b, me2, 38); \
+	if (t <= 39) HASHCLASH_SHA1COMPRESS_ROUND2_STEP(b, c, d, e, a, me2, 39); \
+	if (t <= 40) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, me2, 40); \
+	if (t <= 41) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, me2, 41); \
+	if (t <= 42) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, me2, 42); \
+	if (t <= 43) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, me2, 43); \
+	if (t <= 44) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, me2, 44); \
+	if (t <= 45) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, me2, 45); \
+	if (t <= 46) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, me2, 46); \
+	if (t <= 47) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, me2, 47); \
+	if (t <= 48) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, me2, 48); \
+	if (t <= 49) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, me2, 49); \
+	if (t <= 50) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, me2, 50); \
+	if (t <= 51) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, me2, 51); \
+	if (t <= 52) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, me2, 52); \
+	if (t <= 53) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, me2, 53); \
+	if (t <= 54) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, me2, 54); \
+	if (t <= 55) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(a, b, c, d, e, me2, 55); \
+	if (t <= 56) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(e, a, b, c, d, me2, 56); \
+	if (t <= 57) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(d, e, a, b, c, me2, 57); \
+	if (t <= 58) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(c, d, e, a, b, me2, 58); \
+	if (t <= 59) HASHCLASH_SHA1COMPRESS_ROUND3_STEP(b, c, d, e, a, me2, 59); \
+	if (t <= 60) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, me2, 60); \
+	if (t <= 61) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, me2, 61); \
+	if (t <= 62) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, me2, 62); \
+	if (t <= 63) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, me2, 63); \
+	if (t <= 64) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, me2, 64); \
+	if (t <= 65) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, me2, 65); \
+	if (t <= 66) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, me2, 66); \
+	if (t <= 67) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, me2, 67); \
+	if (t <= 68) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, me2, 68); \
+	if (t <= 69) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, me2, 69); \
+	if (t <= 70) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, me2, 70); \
+	if (t <= 71) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, me2, 71); \
+	if (t <= 72) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, me2, 72); \
+	if (t <= 73) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, me2, 73); \
+	if (t <= 74) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, me2, 74); \
+	if (t <= 75) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(a, b, c, d, e, me2, 75); \
+	if (t <= 76) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(e, a, b, c, d, me2, 76); \
+	if (t <= 77) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(d, e, a, b, c, me2, 77); \
+	if (t <= 78) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(c, d, e, a, b, me2, 78); \
+	if (t <= 79) HASHCLASH_SHA1COMPRESS_ROUND4_STEP(b, c, d, e, a, me2, 79); \
+	ihvout[0] = ihvin[0] + a; ihvout[1] = ihvin[1] + b; ihvout[2] = ihvin[2] + c; ihvout[3] = ihvin[3] + d; ihvout[4] = ihvin[4] + e; \
+}
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4127)  /* Compiler complains about the checks in the above macro being constant. */
+#endif
+
+#ifdef DOSTORESTATE0
+SHA1_RECOMPRESS(0)
+#endif
+
+#ifdef DOSTORESTATE1
+SHA1_RECOMPRESS(1)
+#endif
+
+#ifdef DOSTORESTATE2
+SHA1_RECOMPRESS(2)
+#endif
+
+#ifdef DOSTORESTATE3
+SHA1_RECOMPRESS(3)
+#endif
+
+#ifdef DOSTORESTATE4
+SHA1_RECOMPRESS(4)
+#endif
+
+#ifdef DOSTORESTATE5
+SHA1_RECOMPRESS(5)
+#endif
+
+#ifdef DOSTORESTATE6
+SHA1_RECOMPRESS(6)
+#endif
+
+#ifdef DOSTORESTATE7
+SHA1_RECOMPRESS(7)
+#endif
+
+#ifdef DOSTORESTATE8
+SHA1_RECOMPRESS(8)
+#endif
+
+#ifdef DOSTORESTATE9
+SHA1_RECOMPRESS(9)
+#endif
+
+#ifdef DOSTORESTATE10
+SHA1_RECOMPRESS(10)
+#endif
+
+#ifdef DOSTORESTATE11
+SHA1_RECOMPRESS(11)
+#endif
+
+#ifdef DOSTORESTATE12
+SHA1_RECOMPRESS(12)
+#endif
+
+#ifdef DOSTORESTATE13
+SHA1_RECOMPRESS(13)
+#endif
+
+#ifdef DOSTORESTATE14
+SHA1_RECOMPRESS(14)
+#endif
+
+#ifdef DOSTORESTATE15
+SHA1_RECOMPRESS(15)
+#endif
+
+#ifdef DOSTORESTATE16
+SHA1_RECOMPRESS(16)
+#endif
+
+#ifdef DOSTORESTATE17
+SHA1_RECOMPRESS(17)
+#endif
+
+#ifdef DOSTORESTATE18
+SHA1_RECOMPRESS(18)
+#endif
+
+#ifdef DOSTORESTATE19
+SHA1_RECOMPRESS(19)
+#endif
+
+#ifdef DOSTORESTATE20
+SHA1_RECOMPRESS(20)
+#endif
+
+#ifdef DOSTORESTATE21
+SHA1_RECOMPRESS(21)
+#endif
+
+#ifdef DOSTORESTATE22
+SHA1_RECOMPRESS(22)
+#endif
+
+#ifdef DOSTORESTATE23
+SHA1_RECOMPRESS(23)
+#endif
+
+#ifdef DOSTORESTATE24
+SHA1_RECOMPRESS(24)
+#endif
+
+#ifdef DOSTORESTATE25
+SHA1_RECOMPRESS(25)
+#endif
+
+#ifdef DOSTORESTATE26
+SHA1_RECOMPRESS(26)
+#endif
+
+#ifdef DOSTORESTATE27
+SHA1_RECOMPRESS(27)
+#endif
+
+#ifdef DOSTORESTATE28
+SHA1_RECOMPRESS(28)
+#endif
+
+#ifdef DOSTORESTATE29
+SHA1_RECOMPRESS(29)
+#endif
+
+#ifdef DOSTORESTATE30
+SHA1_RECOMPRESS(30)
+#endif
+
+#ifdef DOSTORESTATE31
+SHA1_RECOMPRESS(31)
+#endif
+
+#ifdef DOSTORESTATE32
+SHA1_RECOMPRESS(32)
+#endif
+
+#ifdef DOSTORESTATE33
+SHA1_RECOMPRESS(33)
+#endif
+
+#ifdef DOSTORESTATE34
+SHA1_RECOMPRESS(34)
+#endif
+
+#ifdef DOSTORESTATE35
+SHA1_RECOMPRESS(35)
+#endif
+
+#ifdef DOSTORESTATE36
+SHA1_RECOMPRESS(36)
+#endif
+
+#ifdef DOSTORESTATE37
+SHA1_RECOMPRESS(37)
+#endif
+
+#ifdef DOSTORESTATE38
+SHA1_RECOMPRESS(38)
+#endif
+
+#ifdef DOSTORESTATE39
+SHA1_RECOMPRESS(39)
+#endif
+
+#ifdef DOSTORESTATE40
+SHA1_RECOMPRESS(40)
+#endif
+
+#ifdef DOSTORESTATE41
+SHA1_RECOMPRESS(41)
+#endif
+
+#ifdef DOSTORESTATE42
+SHA1_RECOMPRESS(42)
+#endif
+
+#ifdef DOSTORESTATE43
+SHA1_RECOMPRESS(43)
+#endif
+
+#ifdef DOSTORESTATE44
+SHA1_RECOMPRESS(44)
+#endif
+
+#ifdef DOSTORESTATE45
+SHA1_RECOMPRESS(45)
+#endif
+
+#ifdef DOSTORESTATE46
+SHA1_RECOMPRESS(46)
+#endif
+
+#ifdef DOSTORESTATE47
+SHA1_RECOMPRESS(47)
+#endif
+
+#ifdef DOSTORESTATE48
+SHA1_RECOMPRESS(48)
+#endif
+
+#ifdef DOSTORESTATE49
+SHA1_RECOMPRESS(49)
+#endif
+
+#ifdef DOSTORESTATE50
+SHA1_RECOMPRESS(50)
+#endif
+
+#ifdef DOSTORESTATE51
+SHA1_RECOMPRESS(51)
+#endif
+
+#ifdef DOSTORESTATE52
+SHA1_RECOMPRESS(52)
+#endif
+
+#ifdef DOSTORESTATE53
+SHA1_RECOMPRESS(53)
+#endif
+
+#ifdef DOSTORESTATE54
+SHA1_RECOMPRESS(54)
+#endif
+
+#ifdef DOSTORESTATE55
+SHA1_RECOMPRESS(55)
+#endif
+
+#ifdef DOSTORESTATE56
+SHA1_RECOMPRESS(56)
+#endif
+
+#ifdef DOSTORESTATE57
+SHA1_RECOMPRESS(57)
+#endif
+
+#ifdef DOSTORESTATE58
+SHA1_RECOMPRESS(58)
+#endif
+
+#ifdef DOSTORESTATE59
+SHA1_RECOMPRESS(59)
+#endif
+
+#ifdef DOSTORESTATE60
+SHA1_RECOMPRESS(60)
+#endif
+
+#ifdef DOSTORESTATE61
+SHA1_RECOMPRESS(61)
+#endif
+
+#ifdef DOSTORESTATE62
+SHA1_RECOMPRESS(62)
+#endif
+
+#ifdef DOSTORESTATE63
+SHA1_RECOMPRESS(63)
+#endif
+
+#ifdef DOSTORESTATE64
+SHA1_RECOMPRESS(64)
+#endif
+
+#ifdef DOSTORESTATE65
+SHA1_RECOMPRESS(65)
+#endif
+
+#ifdef DOSTORESTATE66
+SHA1_RECOMPRESS(66)
+#endif
+
+#ifdef DOSTORESTATE67
+SHA1_RECOMPRESS(67)
+#endif
+
+#ifdef DOSTORESTATE68
+SHA1_RECOMPRESS(68)
+#endif
+
+#ifdef DOSTORESTATE69
+SHA1_RECOMPRESS(69)
+#endif
+
+#ifdef DOSTORESTATE70
+SHA1_RECOMPRESS(70)
+#endif
+
+#ifdef DOSTORESTATE71
+SHA1_RECOMPRESS(71)
+#endif
+
+#ifdef DOSTORESTATE72
+SHA1_RECOMPRESS(72)
+#endif
+
+#ifdef DOSTORESTATE73
+SHA1_RECOMPRESS(73)
+#endif
+
+#ifdef DOSTORESTATE74
+SHA1_RECOMPRESS(74)
+#endif
+
+#ifdef DOSTORESTATE75
+SHA1_RECOMPRESS(75)
+#endif
+
+#ifdef DOSTORESTATE76
+SHA1_RECOMPRESS(76)
+#endif
+
+#ifdef DOSTORESTATE77
+SHA1_RECOMPRESS(77)
+#endif
+
+#ifdef DOSTORESTATE78
+SHA1_RECOMPRESS(78)
+#endif
+
+#ifdef DOSTORESTATE79
+SHA1_RECOMPRESS(79)
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+static void sha1_recompression_step(uint32_t step, uint32_t ihvin[5], uint32_t ihvout[5], const uint32_t me2[80], const uint32_t state[5])
+{
+	switch (step)
+	{
+#ifdef DOSTORESTATE0
+	case 0:
+		sha1recompress_fast_0(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE1
+	case 1:
+		sha1recompress_fast_1(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE2
+	case 2:
+		sha1recompress_fast_2(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE3
+	case 3:
+		sha1recompress_fast_3(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE4
+	case 4:
+		sha1recompress_fast_4(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE5
+	case 5:
+		sha1recompress_fast_5(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE6
+	case 6:
+		sha1recompress_fast_6(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE7
+	case 7:
+		sha1recompress_fast_7(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE8
+	case 8:
+		sha1recompress_fast_8(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE9
+	case 9:
+		sha1recompress_fast_9(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE10
+	case 10:
+		sha1recompress_fast_10(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE11
+	case 11:
+		sha1recompress_fast_11(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE12
+	case 12:
+		sha1recompress_fast_12(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE13
+	case 13:
+		sha1recompress_fast_13(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE14
+	case 14:
+		sha1recompress_fast_14(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE15
+	case 15:
+		sha1recompress_fast_15(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE16
+	case 16:
+		sha1recompress_fast_16(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE17
+	case 17:
+		sha1recompress_fast_17(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE18
+	case 18:
+		sha1recompress_fast_18(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE19
+	case 19:
+		sha1recompress_fast_19(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE20
+	case 20:
+		sha1recompress_fast_20(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE21
+	case 21:
+		sha1recompress_fast_21(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE22
+	case 22:
+		sha1recompress_fast_22(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE23
+	case 23:
+		sha1recompress_fast_23(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE24
+	case 24:
+		sha1recompress_fast_24(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE25
+	case 25:
+		sha1recompress_fast_25(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE26
+	case 26:
+		sha1recompress_fast_26(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE27
+	case 27:
+		sha1recompress_fast_27(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE28
+	case 28:
+		sha1recompress_fast_28(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE29
+	case 29:
+		sha1recompress_fast_29(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE30
+	case 30:
+		sha1recompress_fast_30(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE31
+	case 31:
+		sha1recompress_fast_31(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE32
+	case 32:
+		sha1recompress_fast_32(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE33
+	case 33:
+		sha1recompress_fast_33(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE34
+	case 34:
+		sha1recompress_fast_34(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE35
+	case 35:
+		sha1recompress_fast_35(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE36
+	case 36:
+		sha1recompress_fast_36(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE37
+	case 37:
+		sha1recompress_fast_37(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE38
+	case 38:
+		sha1recompress_fast_38(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE39
+	case 39:
+		sha1recompress_fast_39(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE40
+	case 40:
+		sha1recompress_fast_40(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE41
+	case 41:
+		sha1recompress_fast_41(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE42
+	case 42:
+		sha1recompress_fast_42(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE43
+	case 43:
+		sha1recompress_fast_43(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE44
+	case 44:
+		sha1recompress_fast_44(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE45
+	case 45:
+		sha1recompress_fast_45(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE46
+	case 46:
+		sha1recompress_fast_46(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE47
+	case 47:
+		sha1recompress_fast_47(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE48
+	case 48:
+		sha1recompress_fast_48(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE49
+	case 49:
+		sha1recompress_fast_49(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE50
+	case 50:
+		sha1recompress_fast_50(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE51
+	case 51:
+		sha1recompress_fast_51(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE52
+	case 52:
+		sha1recompress_fast_52(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE53
+	case 53:
+		sha1recompress_fast_53(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE54
+	case 54:
+		sha1recompress_fast_54(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE55
+	case 55:
+		sha1recompress_fast_55(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE56
+	case 56:
+		sha1recompress_fast_56(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE57
+	case 57:
+		sha1recompress_fast_57(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE58
+	case 58:
+		sha1recompress_fast_58(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE59
+	case 59:
+		sha1recompress_fast_59(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE60
+	case 60:
+		sha1recompress_fast_60(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE61
+	case 61:
+		sha1recompress_fast_61(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE62
+	case 62:
+		sha1recompress_fast_62(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE63
+	case 63:
+		sha1recompress_fast_63(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE64
+	case 64:
+		sha1recompress_fast_64(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE65
+	case 65:
+		sha1recompress_fast_65(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE66
+	case 66:
+		sha1recompress_fast_66(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE67
+	case 67:
+		sha1recompress_fast_67(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE68
+	case 68:
+		sha1recompress_fast_68(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE69
+	case 69:
+		sha1recompress_fast_69(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE70
+	case 70:
+		sha1recompress_fast_70(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE71
+	case 71:
+		sha1recompress_fast_71(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE72
+	case 72:
+		sha1recompress_fast_72(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE73
+	case 73:
+		sha1recompress_fast_73(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE74
+	case 74:
+		sha1recompress_fast_74(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE75
+	case 75:
+		sha1recompress_fast_75(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE76
+	case 76:
+		sha1recompress_fast_76(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE77
+	case 77:
+		sha1recompress_fast_77(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE78
+	case 78:
+		sha1recompress_fast_78(ihvin, ihvout, me2, state);
+		break;
+#endif
+#ifdef DOSTORESTATE79
+	case 79:
+		sha1recompress_fast_79(ihvin, ihvout, me2, state);
+		break;
+#endif
+	default:
+		abort();
+	}
+
+}
+
+
+
+static void sha1_process(SHA1_CTX* ctx, const uint32_t block[16])
+{
+	unsigned i, j;
+	uint32_t ubc_dv_mask[DVMASKSIZE] = { 0xFFFFFFFF };
+	uint32_t ihvtmp[5];
+
+	ctx->ihv1[0] = ctx->ihv[0];
+	ctx->ihv1[1] = ctx->ihv[1];
+	ctx->ihv1[2] = ctx->ihv[2];
+	ctx->ihv1[3] = ctx->ihv[3];
+	ctx->ihv1[4] = ctx->ihv[4];
+
+	sha1_compression_states(ctx->ihv, block, ctx->m1, ctx->states);
+
+	if (ctx->detect_coll)
+	{
+		if (ctx->ubc_check)
+		{
+			ubc_check(ctx->m1, ubc_dv_mask);
+		}
+
+		if (ubc_dv_mask[0] != 0)
+		{
+			for (i = 0; sha1_dvs[i].dvType != 0; ++i)
+			{
+				if (ubc_dv_mask[0] & ((uint32_t)(1) << sha1_dvs[i].maskb))
+				{
+					for (j = 0; j < 80; ++j)
+						ctx->m2[j] = ctx->m1[j] ^ sha1_dvs[i].dm[j];
+
+					sha1_recompression_step(sha1_dvs[i].testt, ctx->ihv2, ihvtmp, ctx->m2, ctx->states[sha1_dvs[i].testt]);
+
+					/* to verify SHA-1 collision detection code with collisions for reduced-step SHA-1 */
+					if ((0 == ((ihvtmp[0] ^ ctx->ihv[0]) | (ihvtmp[1] ^ ctx->ihv[1]) | (ihvtmp[2] ^ ctx->ihv[2]) | (ihvtmp[3] ^ ctx->ihv[3]) | (ihvtmp[4] ^ ctx->ihv[4])))
+						|| (ctx->reduced_round_coll && 0==((ctx->ihv1[0] ^ ctx->ihv2[0]) | (ctx->ihv1[1] ^ ctx->ihv2[1]) | (ctx->ihv1[2] ^ ctx->ihv2[2]) | (ctx->ihv1[3] ^ ctx->ihv2[3]) | (ctx->ihv1[4] ^ ctx->ihv2[4]))))
+					{
+						ctx->found_collision = 1;
+
+						if (ctx->safe_hash)
+						{
+							sha1_compression_W(ctx->ihv, ctx->m1);
+							sha1_compression_W(ctx->ihv, ctx->m1);
+						}
+
+						break;
+					}
+				}
+			}
+		}
+	}
+}
+
+void SHA1DCInit(SHA1_CTX* ctx)
+{
+	ctx->total = 0;
+	ctx->ihv[0] = 0x67452301;
+	ctx->ihv[1] = 0xEFCDAB89;
+	ctx->ihv[2] = 0x98BADCFE;
+	ctx->ihv[3] = 0x10325476;
+	ctx->ihv[4] = 0xC3D2E1F0;
+	ctx->found_collision = 0;
+	ctx->safe_hash = SHA1DC_INIT_SAFE_HASH_DEFAULT;
+	ctx->ubc_check = 1;
+	ctx->detect_coll = 1;
+	ctx->reduced_round_coll = 0;
+	ctx->callback = NULL;
+}
+
+void SHA1DCSetSafeHash(SHA1_CTX* ctx, int safehash)
+{
+	if (safehash)
+		ctx->safe_hash = 1;
+	else
+		ctx->safe_hash = 0;
+}
+
+
+void SHA1DCSetUseUBC(SHA1_CTX* ctx, int ubc_check)
+{
+	if (ubc_check)
+		ctx->ubc_check = 1;
+	else
+		ctx->ubc_check = 0;
+}
+
+void SHA1DCSetUseDetectColl(SHA1_CTX* ctx, int detect_coll)
+{
+	if (detect_coll)
+		ctx->detect_coll = 1;
+	else
+		ctx->detect_coll = 0;
+}
+
+void SHA1DCSetDetectReducedRoundCollision(SHA1_CTX* ctx, int reduced_round_coll)
+{
+	if (reduced_round_coll)
+		ctx->reduced_round_coll = 1;
+	else
+		ctx->reduced_round_coll = 0;
+}
+
+void SHA1DCSetCallback(SHA1_CTX* ctx, collision_block_callback callback)
+{
+	ctx->callback = callback;
+}
+
+void SHA1DCUpdate(SHA1_CTX* ctx, const char* buf, size_t len)
+{
+	unsigned left, fill;
+
+	if (len == 0)
+		return;
+
+	left = ctx->total & 63;
+	fill = 64 - left;
+
+	if (left && len >= fill)
+	{
+		ctx->total += fill;
+		memcpy(ctx->buffer + left, buf, fill);
+		sha1_process(ctx, (uint32_t*)(ctx->buffer));
+		buf += fill;
+		len -= fill;
+		left = 0;
+	}
+	while (len >= 64)
+	{
+		ctx->total += 64;
+
+#if defined(SHA1DC_ALLOW_UNALIGNED_ACCESS)
+		sha1_process(ctx, (uint32_t*)(buf));
+#else
+		memcpy(ctx->buffer, buf, 64);
+		sha1_process(ctx, (uint32_t*)(ctx->buffer));
+#endif /* defined(SHA1DC_ALLOW_UNALIGNED_ACCESS) */
+		buf += 64;
+		len -= 64;
+	}
+	if (len > 0)
+	{
+		ctx->total += len;
+		memcpy(ctx->buffer + left, buf, len);
+	}
+}
+
+static const unsigned char sha1_padding[64] =
+{
+	0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+int SHA1DCFinal(unsigned char output[20], SHA1_CTX *ctx)
+{
+	uint32_t last = ctx->total & 63;
+	uint32_t padn = (last < 56) ? (56 - last) : (120 - last);
+	uint64_t total;
+	SHA1DCUpdate(ctx, (const char*)(sha1_padding), padn);
+
+	total = ctx->total - padn;
+	total <<= 3;
+	ctx->buffer[56] = (unsigned char)(total >> 56);
+	ctx->buffer[57] = (unsigned char)(total >> 48);
+	ctx->buffer[58] = (unsigned char)(total >> 40);
+	ctx->buffer[59] = (unsigned char)(total >> 32);
+	ctx->buffer[60] = (unsigned char)(total >> 24);
+	ctx->buffer[61] = (unsigned char)(total >> 16);
+	ctx->buffer[62] = (unsigned char)(total >> 8);
+	ctx->buffer[63] = (unsigned char)(total);
+	sha1_process(ctx, (uint32_t*)(ctx->buffer));
+	output[0] = (unsigned char)(ctx->ihv[0] >> 24);
+	output[1] = (unsigned char)(ctx->ihv[0] >> 16);
+	output[2] = (unsigned char)(ctx->ihv[0] >> 8);
+	output[3] = (unsigned char)(ctx->ihv[0]);
+	output[4] = (unsigned char)(ctx->ihv[1] >> 24);
+	output[5] = (unsigned char)(ctx->ihv[1] >> 16);
+	output[6] = (unsigned char)(ctx->ihv[1] >> 8);
+	output[7] = (unsigned char)(ctx->ihv[1]);
+	output[8] = (unsigned char)(ctx->ihv[2] >> 24);
+	output[9] = (unsigned char)(ctx->ihv[2] >> 16);
+	output[10] = (unsigned char)(ctx->ihv[2] >> 8);
+	output[11] = (unsigned char)(ctx->ihv[2]);
+	output[12] = (unsigned char)(ctx->ihv[3] >> 24);
+	output[13] = (unsigned char)(ctx->ihv[3] >> 16);
+	output[14] = (unsigned char)(ctx->ihv[3] >> 8);
+	output[15] = (unsigned char)(ctx->ihv[3]);
+	output[16] = (unsigned char)(ctx->ihv[4] >> 24);
+	output[17] = (unsigned char)(ctx->ihv[4] >> 16);
+	output[18] = (unsigned char)(ctx->ihv[4] >> 8);
+	output[19] = (unsigned char)(ctx->ihv[4]);
+	return ctx->found_collision;
+}
+
+#ifdef SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_C
+#include SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_C
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/sha1dc/lib/sha1.h	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,117 @@
+/***
+* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
+* Distributed under the MIT Software License.
+* See accompanying file LICENSE.txt or copy at
+* https://opensource.org/licenses/MIT
+***/
+
+#ifndef SHA1DC_SHA1_H
+#define SHA1DC_SHA1_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef SHA1DC_NO_STANDARD_INCLUDES
+/* PY27 this can be changed to a straight #include once Python 2.7 is
+   dropped, since this is for MSVC 2008 support. */
+#if !defined(_MSC_VER) || _MSC_VER >= 1600
+#include <stdint.h>
+#else
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+#endif
+#endif
+
+/* sha-1 compression function that takes an already expanded message, and additionally store intermediate states */
+/* only stores states ii (the state between step ii-1 and step ii) when DOSTORESTATEii is defined in ubc_check.h */
+void sha1_compression_states(uint32_t[5], const uint32_t[16], uint32_t[80], uint32_t[80][5]);
+
+/*
+// Function type for sha1_recompression_step_T (uint32_t ihvin[5], uint32_t ihvout[5], const uint32_t me2[80], const uint32_t state[5]).
+// Where 0 <= T < 80
+//       me2 is an expanded message (the expansion of an original message block XOR'ed with a disturbance vector's message block difference.)
+//       state is the internal state (a,b,c,d,e) before step T of the SHA-1 compression function while processing the original message block.
+// The function will return:
+//       ihvin: The reconstructed input chaining value.
+//       ihvout: The reconstructed output chaining value.
+*/
+typedef void(*sha1_recompression_type)(uint32_t*, uint32_t*, const uint32_t*, const uint32_t*);
+
+/* A callback function type that can be set to be called when a collision block has been found: */
+/* void collision_block_callback(uint64_t byteoffset, const uint32_t ihvin1[5], const uint32_t ihvin2[5], const uint32_t m1[80], const uint32_t m2[80]) */
+typedef void(*collision_block_callback)(uint64_t, const uint32_t*, const uint32_t*, const uint32_t*, const uint32_t*);
+
+/* The SHA-1 context. */
+typedef struct {
+	uint64_t total;
+	uint32_t ihv[5];
+	unsigned char buffer[64];
+	int found_collision;
+	int safe_hash;
+	int detect_coll;
+	int ubc_check;
+	int reduced_round_coll;
+	collision_block_callback callback;
+
+	uint32_t ihv1[5];
+	uint32_t ihv2[5];
+	uint32_t m1[80];
+	uint32_t m2[80];
+	uint32_t states[80][5];
+} SHA1_CTX;
+
+/* Initialize SHA-1 context. */
+void SHA1DCInit(SHA1_CTX*);
+
+/*
+    Function to enable safe SHA-1 hashing:
+    Collision attacks are thwarted by hashing a detected near-collision block 3 times.
+    Think of it as extending SHA-1 from 80-steps to 240-steps for such blocks:
+        The best collision attacks against SHA-1 have complexity about 2^60,
+        thus for 240-steps an immediate lower-bound for the best cryptanalytic attacks would be 2^180.
+        An attacker would be better off using a generic birthday search of complexity 2^80.
+
+   Enabling safe SHA-1 hashing will result in the correct SHA-1 hash for messages where no collision attack was detected,
+   but it will result in a different SHA-1 hash for messages where a collision attack was detected.
+   This will automatically invalidate SHA-1 based digital signature forgeries.
+   Enabled by default.
+*/
+void SHA1DCSetSafeHash(SHA1_CTX*, int);
+
+/*
+    Function to disable or enable the use of Unavoidable Bitconditions (provides a significant speed up).
+    Enabled by default
+ */
+void SHA1DCSetUseUBC(SHA1_CTX*, int);
+
+/*
+    Function to disable or enable the use of Collision Detection.
+    Enabled by default.
+ */
+void SHA1DCSetUseDetectColl(SHA1_CTX*, int);
+
+/* function to disable or enable the detection of reduced-round SHA-1 collisions */
+/* disabled by default */
+void SHA1DCSetDetectReducedRoundCollision(SHA1_CTX*, int);
+
+/* function to set a callback function, pass NULL to disable */
+/* by default no callback set */
+void SHA1DCSetCallback(SHA1_CTX*, collision_block_callback);
+
+/* update SHA-1 context with buffer contents */
+void SHA1DCUpdate(SHA1_CTX*, const char*, size_t);
+
+/* obtain SHA-1 hash from SHA-1 context */
+/* returns: 0 = no collision detected, otherwise = collision found => warn user for active attack */
+int  SHA1DCFinal(unsigned char[20], SHA1_CTX*);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#ifdef SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_H
+#include SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_H
+#endif
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/sha1dc/lib/ubc_check.c	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,374 @@
+/***
+* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
+* Distributed under the MIT Software License.
+* See accompanying file LICENSE.txt or copy at
+* https://opensource.org/licenses/MIT
+***/
+
+/*
+// this file was generated by the 'parse_bitrel' program in the tools section
+// using the data files from directory 'tools/data/3565'
+//
+// sha1_dvs contains a list of SHA-1 Disturbance Vectors (DV) to check
+// dvType, dvK and dvB define the DV: I(K,B) or II(K,B) (see the paper)
+// dm[80] is the expanded message block XOR-difference defined by the DV
+// testt is the step to do the recompression from for collision detection
+// maski and maskb define the bit to check for each DV in the dvmask returned by ubc_check
+//
+// ubc_check takes as input an expanded message block and verifies the unavoidable bitconditions for all listed DVs
+// it returns a dvmask where each bit belonging to a DV is set if all unavoidable bitconditions for that DV have been met
+// thus one needs to do the recompression check for each DV that has its bit set
+//
+// ubc_check is programmatically generated and the unavoidable bitconditions have been hardcoded
+// a directly verifiable version named ubc_check_verify can be found in ubc_check_verify.c
+// ubc_check has been verified against ubc_check_verify using the 'ubc_check_test' program in the tools section
+*/
+
+#ifndef SHA1DC_NO_STANDARD_INCLUDES
+#if !defined(_MSC_VER) || _MSC_VER >= 1600
+#include <stdint.h>
+#endif
+#endif
+#ifdef SHA1DC_CUSTOM_INCLUDE_UBC_CHECK_C
+#include SHA1DC_CUSTOM_INCLUDE_UBC_CHECK_C
+#endif
+#include "ubc_check.h"
+
+static const uint32_t DV_I_43_0_bit 	= (uint32_t)(1) << 0;
+static const uint32_t DV_I_44_0_bit 	= (uint32_t)(1) << 1;
+static const uint32_t DV_I_45_0_bit 	= (uint32_t)(1) << 2;
+static const uint32_t DV_I_46_0_bit 	= (uint32_t)(1) << 3;
+static const uint32_t DV_I_46_2_bit 	= (uint32_t)(1) << 4;
+static const uint32_t DV_I_47_0_bit 	= (uint32_t)(1) << 5;
+static const uint32_t DV_I_47_2_bit 	= (uint32_t)(1) << 6;
+static const uint32_t DV_I_48_0_bit 	= (uint32_t)(1) << 7;
+static const uint32_t DV_I_48_2_bit 	= (uint32_t)(1) << 8;
+static const uint32_t DV_I_49_0_bit 	= (uint32_t)(1) << 9;
+static const uint32_t DV_I_49_2_bit 	= (uint32_t)(1) << 10;
+static const uint32_t DV_I_50_0_bit 	= (uint32_t)(1) << 11;
+static const uint32_t DV_I_50_2_bit 	= (uint32_t)(1) << 12;
+static const uint32_t DV_I_51_0_bit 	= (uint32_t)(1) << 13;
+static const uint32_t DV_I_51_2_bit 	= (uint32_t)(1) << 14;
+static const uint32_t DV_I_52_0_bit 	= (uint32_t)(1) << 15;
+static const uint32_t DV_II_45_0_bit 	= (uint32_t)(1) << 16;
+static const uint32_t DV_II_46_0_bit 	= (uint32_t)(1) << 17;
+static const uint32_t DV_II_46_2_bit 	= (uint32_t)(1) << 18;
+static const uint32_t DV_II_47_0_bit 	= (uint32_t)(1) << 19;
+static const uint32_t DV_II_48_0_bit 	= (uint32_t)(1) << 20;
+static const uint32_t DV_II_49_0_bit 	= (uint32_t)(1) << 21;
+static const uint32_t DV_II_49_2_bit 	= (uint32_t)(1) << 22;
+static const uint32_t DV_II_50_0_bit 	= (uint32_t)(1) << 23;
+static const uint32_t DV_II_50_2_bit 	= (uint32_t)(1) << 24;
+static const uint32_t DV_II_51_0_bit 	= (uint32_t)(1) << 25;
+static const uint32_t DV_II_51_2_bit 	= (uint32_t)(1) << 26;
+static const uint32_t DV_II_52_0_bit 	= (uint32_t)(1) << 27;
+static const uint32_t DV_II_53_0_bit 	= (uint32_t)(1) << 28;
+static const uint32_t DV_II_54_0_bit 	= (uint32_t)(1) << 29;
+static const uint32_t DV_II_55_0_bit 	= (uint32_t)(1) << 30;
+static const uint32_t DV_II_56_0_bit 	= (uint32_t)(1) << 31;
+
+dv_info_t sha1_dvs[] =
+{
+  {1,43,0,58,0,0, { 0x08000000,0x9800000c,0xd8000010,0x08000010,0xb8000010,0x98000000,0x60000000,0x00000008,0xc0000000,0x90000014,0x10000010,0xb8000014,0x28000000,0x20000010,0x48000000,0x08000018,0x60000000,0x90000010,0xf0000010,0x90000008,0xc0000000,0x90000010,0xf0000010,0xb0000008,0x40000000,0x90000000,0xf0000010,0x90000018,0x60000000,0x90000010,0x90000010,0x90000000,0x80000000,0x00000010,0xa0000000,0x20000000,0xa0000000,0x20000010,0x00000000,0x20000010,0x20000000,0x00000010,0x20000000,0x00000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000040,0x40000002,0x80000004,0x80000080,0x80000006,0x00000049,0x00000103,0x80000009,0x80000012,0x80000202,0x00000018,0x00000164,0x00000408,0x800000e6,0x8000004c,0x00000803,0x80000161,0x80000599 } }
+, {1,44,0,58,0,1, { 0xb4000008,0x08000000,0x9800000c,0xd8000010,0x08000010,0xb8000010,0x98000000,0x60000000,0x00000008,0xc0000000,0x90000014,0x10000010,0xb8000014,0x28000000,0x20000010,0x48000000,0x08000018,0x60000000,0x90000010,0xf0000010,0x90000008,0xc0000000,0x90000010,0xf0000010,0xb0000008,0x40000000,0x90000000,0xf0000010,0x90000018,0x60000000,0x90000010,0x90000010,0x90000000,0x80000000,0x00000010,0xa0000000,0x20000000,0xa0000000,0x20000010,0x00000000,0x20000010,0x20000000,0x00000010,0x20000000,0x00000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000040,0x40000002,0x80000004,0x80000080,0x80000006,0x00000049,0x00000103,0x80000009,0x80000012,0x80000202,0x00000018,0x00000164,0x00000408,0x800000e6,0x8000004c,0x00000803,0x80000161 } }
+, {1,45,0,58,0,2, { 0xf4000014,0xb4000008,0x08000000,0x9800000c,0xd8000010,0x08000010,0xb8000010,0x98000000,0x60000000,0x00000008,0xc0000000,0x90000014,0x10000010,0xb8000014,0x28000000,0x20000010,0x48000000,0x08000018,0x60000000,0x90000010,0xf0000010,0x90000008,0xc0000000,0x90000010,0xf0000010,0xb0000008,0x40000000,0x90000000,0xf0000010,0x90000018,0x60000000,0x90000010,0x90000010,0x90000000,0x80000000,0x00000010,0xa0000000,0x20000000,0xa0000000,0x20000010,0x00000000,0x20000010,0x20000000,0x00000010,0x20000000,0x00000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000040,0x40000002,0x80000004,0x80000080,0x80000006,0x00000049,0x00000103,0x80000009,0x80000012,0x80000202,0x00000018,0x00000164,0x00000408,0x800000e6,0x8000004c,0x00000803 } }
+, {1,46,0,58,0,3, { 0x2c000010,0xf4000014,0xb4000008,0x08000000,0x9800000c,0xd8000010,0x08000010,0xb8000010,0x98000000,0x60000000,0x00000008,0xc0000000,0x90000014,0x10000010,0xb8000014,0x28000000,0x20000010,0x48000000,0x08000018,0x60000000,0x90000010,0xf0000010,0x90000008,0xc0000000,0x90000010,0xf0000010,0xb0000008,0x40000000,0x90000000,0xf0000010,0x90000018,0x60000000,0x90000010,0x90000010,0x90000000,0x80000000,0x00000010,0xa0000000,0x20000000,0xa0000000,0x20000010,0x00000000,0x20000010,0x20000000,0x00000010,0x20000000,0x00000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000040,0x40000002,0x80000004,0x80000080,0x80000006,0x00000049,0x00000103,0x80000009,0x80000012,0x80000202,0x00000018,0x00000164,0x00000408,0x800000e6,0x8000004c } }
+, {1,46,2,58,0,4, { 0xb0000040,0xd0000053,0xd0000022,0x20000000,0x60000032,0x60000043,0x20000040,0xe0000042,0x60000002,0x80000001,0x00000020,0x00000003,0x40000052,0x40000040,0xe0000052,0xa0000000,0x80000040,0x20000001,0x20000060,0x80000001,0x40000042,0xc0000043,0x40000022,0x00000003,0x40000042,0xc0000043,0xc0000022,0x00000001,0x40000002,0xc0000043,0x40000062,0x80000001,0x40000042,0x40000042,0x40000002,0x00000002,0x00000040,0x80000002,0x80000000,0x80000002,0x80000040,0x00000000,0x80000040,0x80000000,0x00000040,0x80000000,0x00000040,0x80000002,0x00000000,0x80000000,0x80000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000004,0x00000080,0x00000004,0x00000009,0x00000101,0x00000009,0x00000012,0x00000202,0x0000001a,0x00000124,0x0000040c,0x00000026,0x0000004a,0x0000080a,0x00000060,0x00000590,0x00001020,0x0000039a,0x00000132 } }
+, {1,47,0,58,0,5, { 0xc8000010,0x2c000010,0xf4000014,0xb4000008,0x08000000,0x9800000c,0xd8000010,0x08000010,0xb8000010,0x98000000,0x60000000,0x00000008,0xc0000000,0x90000014,0x10000010,0xb8000014,0x28000000,0x20000010,0x48000000,0x08000018,0x60000000,0x90000010,0xf0000010,0x90000008,0xc0000000,0x90000010,0xf0000010,0xb0000008,0x40000000,0x90000000,0xf0000010,0x90000018,0x60000000,0x90000010,0x90000010,0x90000000,0x80000000,0x00000010,0xa0000000,0x20000000,0xa0000000,0x20000010,0x00000000,0x20000010,0x20000000,0x00000010,0x20000000,0x00000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000040,0x40000002,0x80000004,0x80000080,0x80000006,0x00000049,0x00000103,0x80000009,0x80000012,0x80000202,0x00000018,0x00000164,0x00000408,0x800000e6 } }
+, {1,47,2,58,0,6, { 0x20000043,0xb0000040,0xd0000053,0xd0000022,0x20000000,0x60000032,0x60000043,0x20000040,0xe0000042,0x60000002,0x80000001,0x00000020,0x00000003,0x40000052,0x40000040,0xe0000052,0xa0000000,0x80000040,0x20000001,0x20000060,0x80000001,0x40000042,0xc0000043,0x40000022,0x00000003,0x40000042,0xc0000043,0xc0000022,0x00000001,0x40000002,0xc0000043,0x40000062,0x80000001,0x40000042,0x40000042,0x40000002,0x00000002,0x00000040,0x80000002,0x80000000,0x80000002,0x80000040,0x00000000,0x80000040,0x80000000,0x00000040,0x80000000,0x00000040,0x80000002,0x00000000,0x80000000,0x80000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000004,0x00000080,0x00000004,0x00000009,0x00000101,0x00000009,0x00000012,0x00000202,0x0000001a,0x00000124,0x0000040c,0x00000026,0x0000004a,0x0000080a,0x00000060,0x00000590,0x00001020,0x0000039a } }
+, {1,48,0,58,0,7, { 0xb800000a,0xc8000010,0x2c000010,0xf4000014,0xb4000008,0x08000000,0x9800000c,0xd8000010,0x08000010,0xb8000010,0x98000000,0x60000000,0x00000008,0xc0000000,0x90000014,0x10000010,0xb8000014,0x28000000,0x20000010,0x48000000,0x08000018,0x60000000,0x90000010,0xf0000010,0x90000008,0xc0000000,0x90000010,0xf0000010,0xb0000008,0x40000000,0x90000000,0xf0000010,0x90000018,0x60000000,0x90000010,0x90000010,0x90000000,0x80000000,0x00000010,0xa0000000,0x20000000,0xa0000000,0x20000010,0x00000000,0x20000010,0x20000000,0x00000010,0x20000000,0x00000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000040,0x40000002,0x80000004,0x80000080,0x80000006,0x00000049,0x00000103,0x80000009,0x80000012,0x80000202,0x00000018,0x00000164,0x00000408 } }
+, {1,48,2,58,0,8, { 0xe000002a,0x20000043,0xb0000040,0xd0000053,0xd0000022,0x20000000,0x60000032,0x60000043,0x20000040,0xe0000042,0x60000002,0x80000001,0x00000020,0x00000003,0x40000052,0x40000040,0xe0000052,0xa0000000,0x80000040,0x20000001,0x20000060,0x80000001,0x40000042,0xc0000043,0x40000022,0x00000003,0x40000042,0xc0000043,0xc0000022,0x00000001,0x40000002,0xc0000043,0x40000062,0x80000001,0x40000042,0x40000042,0x40000002,0x00000002,0x00000040,0x80000002,0x80000000,0x80000002,0x80000040,0x00000000,0x80000040,0x80000000,0x00000040,0x80000000,0x00000040,0x80000002,0x00000000,0x80000000,0x80000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000004,0x00000080,0x00000004,0x00000009,0x00000101,0x00000009,0x00000012,0x00000202,0x0000001a,0x00000124,0x0000040c,0x00000026,0x0000004a,0x0000080a,0x00000060,0x00000590,0x00001020 } }
+, {1,49,0,58,0,9, { 0x18000000,0xb800000a,0xc8000010,0x2c000010,0xf4000014,0xb4000008,0x08000000,0x9800000c,0xd8000010,0x08000010,0xb8000010,0x98000000,0x60000000,0x00000008,0xc0000000,0x90000014,0x10000010,0xb8000014,0x28000000,0x20000010,0x48000000,0x08000018,0x60000000,0x90000010,0xf0000010,0x90000008,0xc0000000,0x90000010,0xf0000010,0xb0000008,0x40000000,0x90000000,0xf0000010,0x90000018,0x60000000,0x90000010,0x90000010,0x90000000,0x80000000,0x00000010,0xa0000000,0x20000000,0xa0000000,0x20000010,0x00000000,0x20000010,0x20000000,0x00000010,0x20000000,0x00000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000040,0x40000002,0x80000004,0x80000080,0x80000006,0x00000049,0x00000103,0x80000009,0x80000012,0x80000202,0x00000018,0x00000164 } }
+, {1,49,2,58,0,10, { 0x60000000,0xe000002a,0x20000043,0xb0000040,0xd0000053,0xd0000022,0x20000000,0x60000032,0x60000043,0x20000040,0xe0000042,0x60000002,0x80000001,0x00000020,0x00000003,0x40000052,0x40000040,0xe0000052,0xa0000000,0x80000040,0x20000001,0x20000060,0x80000001,0x40000042,0xc0000043,0x40000022,0x00000003,0x40000042,0xc0000043,0xc0000022,0x00000001,0x40000002,0xc0000043,0x40000062,0x80000001,0x40000042,0x40000042,0x40000002,0x00000002,0x00000040,0x80000002,0x80000000,0x80000002,0x80000040,0x00000000,0x80000040,0x80000000,0x00000040,0x80000000,0x00000040,0x80000002,0x00000000,0x80000000,0x80000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000004,0x00000080,0x00000004,0x00000009,0x00000101,0x00000009,0x00000012,0x00000202,0x0000001a,0x00000124,0x0000040c,0x00000026,0x0000004a,0x0000080a,0x00000060,0x00000590 } }
+, {1,50,0,65,0,11, { 0x0800000c,0x18000000,0xb800000a,0xc8000010,0x2c000010,0xf4000014,0xb4000008,0x08000000,0x9800000c,0xd8000010,0x08000010,0xb8000010,0x98000000,0x60000000,0x00000008,0xc0000000,0x90000014,0x10000010,0xb8000014,0x28000000,0x20000010,0x48000000,0x08000018,0x60000000,0x90000010,0xf0000010,0x90000008,0xc0000000,0x90000010,0xf0000010,0xb0000008,0x40000000,0x90000000,0xf0000010,0x90000018,0x60000000,0x90000010,0x90000010,0x90000000,0x80000000,0x00000010,0xa0000000,0x20000000,0xa0000000,0x20000010,0x00000000,0x20000010,0x20000000,0x00000010,0x20000000,0x00000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000040,0x40000002,0x80000004,0x80000080,0x80000006,0x00000049,0x00000103,0x80000009,0x80000012,0x80000202,0x00000018 } }
+, {1,50,2,65,0,12, { 0x20000030,0x60000000,0xe000002a,0x20000043,0xb0000040,0xd0000053,0xd0000022,0x20000000,0x60000032,0x60000043,0x20000040,0xe0000042,0x60000002,0x80000001,0x00000020,0x00000003,0x40000052,0x40000040,0xe0000052,0xa0000000,0x80000040,0x20000001,0x20000060,0x80000001,0x40000042,0xc0000043,0x40000022,0x00000003,0x40000042,0xc0000043,0xc0000022,0x00000001,0x40000002,0xc0000043,0x40000062,0x80000001,0x40000042,0x40000042,0x40000002,0x00000002,0x00000040,0x80000002,0x80000000,0x80000002,0x80000040,0x00000000,0x80000040,0x80000000,0x00000040,0x80000000,0x00000040,0x80000002,0x00000000,0x80000000,0x80000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000004,0x00000080,0x00000004,0x00000009,0x00000101,0x00000009,0x00000012,0x00000202,0x0000001a,0x00000124,0x0000040c,0x00000026,0x0000004a,0x0000080a,0x00000060 } }
+, {1,51,0,65,0,13, { 0xe8000000,0x0800000c,0x18000000,0xb800000a,0xc8000010,0x2c000010,0xf4000014,0xb4000008,0x08000000,0x9800000c,0xd8000010,0x08000010,0xb8000010,0x98000000,0x60000000,0x00000008,0xc0000000,0x90000014,0x10000010,0xb8000014,0x28000000,0x20000010,0x48000000,0x08000018,0x60000000,0x90000010,0xf0000010,0x90000008,0xc0000000,0x90000010,0xf0000010,0xb0000008,0x40000000,0x90000000,0xf0000010,0x90000018,0x60000000,0x90000010,0x90000010,0x90000000,0x80000000,0x00000010,0xa0000000,0x20000000,0xa0000000,0x20000010,0x00000000,0x20000010,0x20000000,0x00000010,0x20000000,0x00000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000040,0x40000002,0x80000004,0x80000080,0x80000006,0x00000049,0x00000103,0x80000009,0x80000012,0x80000202 } }
+, {1,51,2,65,0,14, { 0xa0000003,0x20000030,0x60000000,0xe000002a,0x20000043,0xb0000040,0xd0000053,0xd0000022,0x20000000,0x60000032,0x60000043,0x20000040,0xe0000042,0x60000002,0x80000001,0x00000020,0x00000003,0x40000052,0x40000040,0xe0000052,0xa0000000,0x80000040,0x20000001,0x20000060,0x80000001,0x40000042,0xc0000043,0x40000022,0x00000003,0x40000042,0xc0000043,0xc0000022,0x00000001,0x40000002,0xc0000043,0x40000062,0x80000001,0x40000042,0x40000042,0x40000002,0x00000002,0x00000040,0x80000002,0x80000000,0x80000002,0x80000040,0x00000000,0x80000040,0x80000000,0x00000040,0x80000000,0x00000040,0x80000002,0x00000000,0x80000000,0x80000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000004,0x00000080,0x00000004,0x00000009,0x00000101,0x00000009,0x00000012,0x00000202,0x0000001a,0x00000124,0x0000040c,0x00000026,0x0000004a,0x0000080a } }
+, {1,52,0,65,0,15, { 0x04000010,0xe8000000,0x0800000c,0x18000000,0xb800000a,0xc8000010,0x2c000010,0xf4000014,0xb4000008,0x08000000,0x9800000c,0xd8000010,0x08000010,0xb8000010,0x98000000,0x60000000,0x00000008,0xc0000000,0x90000014,0x10000010,0xb8000014,0x28000000,0x20000010,0x48000000,0x08000018,0x60000000,0x90000010,0xf0000010,0x90000008,0xc0000000,0x90000010,0xf0000010,0xb0000008,0x40000000,0x90000000,0xf0000010,0x90000018,0x60000000,0x90000010,0x90000010,0x90000000,0x80000000,0x00000010,0xa0000000,0x20000000,0xa0000000,0x20000010,0x00000000,0x20000010,0x20000000,0x00000010,0x20000000,0x00000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000040,0x40000002,0x80000004,0x80000080,0x80000006,0x00000049,0x00000103,0x80000009,0x80000012 } }
+, {2,45,0,58,0,16, { 0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b,0x80000107,0x00000089,0x00000014,0x8000024b,0x0000011b,0x8000016d,0x8000041a,0x000002e4,0x80000054,0x00000967 } }
+, {2,46,0,58,0,17, { 0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b,0x80000107,0x00000089,0x00000014,0x8000024b,0x0000011b,0x8000016d,0x8000041a,0x000002e4,0x80000054 } }
+, {2,46,2,58,0,18, { 0x90000070,0xb0000053,0x30000008,0x00000043,0xd0000072,0xb0000010,0xf0000062,0xc0000042,0x00000030,0xe0000042,0x20000060,0xe0000041,0x20000050,0xc0000041,0xe0000072,0xa0000003,0xc0000012,0x60000041,0xc0000032,0x20000001,0xc0000002,0xe0000042,0x60000042,0x80000002,0x00000000,0x00000000,0x80000000,0x00000002,0x00000040,0x00000000,0x80000040,0x80000000,0x00000040,0x80000001,0x00000060,0x80000003,0x40000002,0xc0000040,0xc0000002,0x80000000,0x80000000,0x80000002,0x00000040,0x00000002,0x80000000,0x80000000,0x80000000,0x00000002,0x00000040,0x00000000,0x80000040,0x80000002,0x00000000,0x80000000,0x80000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000004,0x00000080,0x00000004,0x00000009,0x00000105,0x00000089,0x00000016,0x0000020b,0x0000011b,0x0000012d,0x0000041e,0x00000224,0x00000050,0x0000092e,0x0000046c,0x000005b6,0x0000106a,0x00000b90,0x00000152 } }
+, {2,47,0,58,0,19, { 0x20000010,0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b,0x80000107,0x00000089,0x00000014,0x8000024b,0x0000011b,0x8000016d,0x8000041a,0x000002e4 } }
+, {2,48,0,58,0,20, { 0xbc00001a,0x20000010,0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b,0x80000107,0x00000089,0x00000014,0x8000024b,0x0000011b,0x8000016d,0x8000041a } }
+, {2,49,0,58,0,21, { 0x3c000004,0xbc00001a,0x20000010,0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b,0x80000107,0x00000089,0x00000014,0x8000024b,0x0000011b,0x8000016d } }
+, {2,49,2,58,0,22, { 0xf0000010,0xf000006a,0x80000040,0x90000070,0xb0000053,0x30000008,0x00000043,0xd0000072,0xb0000010,0xf0000062,0xc0000042,0x00000030,0xe0000042,0x20000060,0xe0000041,0x20000050,0xc0000041,0xe0000072,0xa0000003,0xc0000012,0x60000041,0xc0000032,0x20000001,0xc0000002,0xe0000042,0x60000042,0x80000002,0x00000000,0x00000000,0x80000000,0x00000002,0x00000040,0x00000000,0x80000040,0x80000000,0x00000040,0x80000001,0x00000060,0x80000003,0x40000002,0xc0000040,0xc0000002,0x80000000,0x80000000,0x80000002,0x00000040,0x00000002,0x80000000,0x80000000,0x80000000,0x00000002,0x00000040,0x00000000,0x80000040,0x80000002,0x00000000,0x80000000,0x80000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000004,0x00000080,0x00000004,0x00000009,0x00000105,0x00000089,0x00000016,0x0000020b,0x0000011b,0x0000012d,0x0000041e,0x00000224,0x00000050,0x0000092e,0x0000046c,0x000005b6 } }
+, {2,50,0,65,0,23, { 0xb400001c,0x3c000004,0xbc00001a,0x20000010,0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b,0x80000107,0x00000089,0x00000014,0x8000024b,0x0000011b } }
+, {2,50,2,65,0,24, { 0xd0000072,0xf0000010,0xf000006a,0x80000040,0x90000070,0xb0000053,0x30000008,0x00000043,0xd0000072,0xb0000010,0xf0000062,0xc0000042,0x00000030,0xe0000042,0x20000060,0xe0000041,0x20000050,0xc0000041,0xe0000072,0xa0000003,0xc0000012,0x60000041,0xc0000032,0x20000001,0xc0000002,0xe0000042,0x60000042,0x80000002,0x00000000,0x00000000,0x80000000,0x00000002,0x00000040,0x00000000,0x80000040,0x80000000,0x00000040,0x80000001,0x00000060,0x80000003,0x40000002,0xc0000040,0xc0000002,0x80000000,0x80000000,0x80000002,0x00000040,0x00000002,0x80000000,0x80000000,0x80000000,0x00000002,0x00000040,0x00000000,0x80000040,0x80000002,0x00000000,0x80000000,0x80000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000004,0x00000080,0x00000004,0x00000009,0x00000105,0x00000089,0x00000016,0x0000020b,0x0000011b,0x0000012d,0x0000041e,0x00000224,0x00000050,0x0000092e,0x0000046c } }
+, {2,51,0,65,0,25, { 0xc0000010,0xb400001c,0x3c000004,0xbc00001a,0x20000010,0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b,0x80000107,0x00000089,0x00000014,0x8000024b } }
+, {2,51,2,65,0,26, { 0x00000043,0xd0000072,0xf0000010,0xf000006a,0x80000040,0x90000070,0xb0000053,0x30000008,0x00000043,0xd0000072,0xb0000010,0xf0000062,0xc0000042,0x00000030,0xe0000042,0x20000060,0xe0000041,0x20000050,0xc0000041,0xe0000072,0xa0000003,0xc0000012,0x60000041,0xc0000032,0x20000001,0xc0000002,0xe0000042,0x60000042,0x80000002,0x00000000,0x00000000,0x80000000,0x00000002,0x00000040,0x00000000,0x80000040,0x80000000,0x00000040,0x80000001,0x00000060,0x80000003,0x40000002,0xc0000040,0xc0000002,0x80000000,0x80000000,0x80000002,0x00000040,0x00000002,0x80000000,0x80000000,0x80000000,0x00000002,0x00000040,0x00000000,0x80000040,0x80000002,0x00000000,0x80000000,0x80000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000004,0x00000080,0x00000004,0x00000009,0x00000105,0x00000089,0x00000016,0x0000020b,0x0000011b,0x0000012d,0x0000041e,0x00000224,0x00000050,0x0000092e } }
+, {2,52,0,65,0,27, { 0x0c000002,0xc0000010,0xb400001c,0x3c000004,0xbc00001a,0x20000010,0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b,0x80000107,0x00000089,0x00000014 } }
+, {2,53,0,65,0,28, { 0xcc000014,0x0c000002,0xc0000010,0xb400001c,0x3c000004,0xbc00001a,0x20000010,0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b,0x80000107,0x00000089 } }
+, {2,54,0,65,0,29, { 0x0400001c,0xcc000014,0x0c000002,0xc0000010,0xb400001c,0x3c000004,0xbc00001a,0x20000010,0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b,0x80000107 } }
+, {2,55,0,65,0,30, { 0x00000010,0x0400001c,0xcc000014,0x0c000002,0xc0000010,0xb400001c,0x3c000004,0xbc00001a,0x20000010,0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046,0x4000004b } }
+, {2,56,0,65,0,31, { 0x2600001a,0x00000010,0x0400001c,0xcc000014,0x0c000002,0xc0000010,0xb400001c,0x3c000004,0xbc00001a,0x20000010,0x2400001c,0xec000014,0x0c000002,0xc0000010,0xb400001c,0x2c000004,0xbc000018,0xb0000010,0x0000000c,0xb8000010,0x08000018,0x78000010,0x08000014,0x70000010,0xb800001c,0xe8000000,0xb0000004,0x58000010,0xb000000c,0x48000000,0xb0000000,0xb8000010,0x98000010,0xa0000000,0x00000000,0x00000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0x20000000,0x00000010,0x60000000,0x00000018,0xe0000000,0x90000000,0x30000010,0xb0000000,0x20000000,0x20000000,0xa0000000,0x00000010,0x80000000,0x20000000,0x20000000,0x20000000,0x80000000,0x00000010,0x00000000,0x20000010,0xa0000000,0x00000000,0x20000000,0x20000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000001,0x00000020,0x00000001,0x40000002,0x40000041,0x40000022,0x80000005,0xc0000082,0xc0000046 } }
+, {0,0,0,0,0,0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}
+};
+void ubc_check(const uint32_t W[80], uint32_t dvmask[1])
+{
+	uint32_t mask = ~((uint32_t)(0));
+	mask &= (((((W[44]^W[45])>>29)&1)-1) | ~(DV_I_48_0_bit|DV_I_51_0_bit|DV_I_52_0_bit|DV_II_45_0_bit|DV_II_46_0_bit|DV_II_50_0_bit|DV_II_51_0_bit));
+	mask &= (((((W[49]^W[50])>>29)&1)-1) | ~(DV_I_46_0_bit|DV_II_45_0_bit|DV_II_50_0_bit|DV_II_51_0_bit|DV_II_55_0_bit|DV_II_56_0_bit));
+	mask &= (((((W[48]^W[49])>>29)&1)-1) | ~(DV_I_45_0_bit|DV_I_52_0_bit|DV_II_49_0_bit|DV_II_50_0_bit|DV_II_54_0_bit|DV_II_55_0_bit));
+	mask &= ((((W[47]^(W[50]>>25))&(1<<4))-(1<<4)) | ~(DV_I_47_0_bit|DV_I_49_0_bit|DV_I_51_0_bit|DV_II_45_0_bit|DV_II_51_0_bit|DV_II_56_0_bit));
+	mask &= (((((W[47]^W[48])>>29)&1)-1) | ~(DV_I_44_0_bit|DV_I_51_0_bit|DV_II_48_0_bit|DV_II_49_0_bit|DV_II_53_0_bit|DV_II_54_0_bit));
+	mask &= (((((W[46]>>4)^(W[49]>>29))&1)-1) | ~(DV_I_46_0_bit|DV_I_48_0_bit|DV_I_50_0_bit|DV_I_52_0_bit|DV_II_50_0_bit|DV_II_55_0_bit));
+	mask &= (((((W[46]^W[47])>>29)&1)-1) | ~(DV_I_43_0_bit|DV_I_50_0_bit|DV_II_47_0_bit|DV_II_48_0_bit|DV_II_52_0_bit|DV_II_53_0_bit));
+	mask &= (((((W[45]>>4)^(W[48]>>29))&1)-1) | ~(DV_I_45_0_bit|DV_I_47_0_bit|DV_I_49_0_bit|DV_I_51_0_bit|DV_II_49_0_bit|DV_II_54_0_bit));
+	mask &= (((((W[45]^W[46])>>29)&1)-1) | ~(DV_I_49_0_bit|DV_I_52_0_bit|DV_II_46_0_bit|DV_II_47_0_bit|DV_II_51_0_bit|DV_II_52_0_bit));
+	mask &= (((((W[44]>>4)^(W[47]>>29))&1)-1) | ~(DV_I_44_0_bit|DV_I_46_0_bit|DV_I_48_0_bit|DV_I_50_0_bit|DV_II_48_0_bit|DV_II_53_0_bit));
+	mask &= (((((W[43]>>4)^(W[46]>>29))&1)-1) | ~(DV_I_43_0_bit|DV_I_45_0_bit|DV_I_47_0_bit|DV_I_49_0_bit|DV_II_47_0_bit|DV_II_52_0_bit));
+	mask &= (((((W[43]^W[44])>>29)&1)-1) | ~(DV_I_47_0_bit|DV_I_50_0_bit|DV_I_51_0_bit|DV_II_45_0_bit|DV_II_49_0_bit|DV_II_50_0_bit));
+	mask &= (((((W[42]>>4)^(W[45]>>29))&1)-1) | ~(DV_I_44_0_bit|DV_I_46_0_bit|DV_I_48_0_bit|DV_I_52_0_bit|DV_II_46_0_bit|DV_II_51_0_bit));
+	mask &= (((((W[41]>>4)^(W[44]>>29))&1)-1) | ~(DV_I_43_0_bit|DV_I_45_0_bit|DV_I_47_0_bit|DV_I_51_0_bit|DV_II_45_0_bit|DV_II_50_0_bit));
+	mask &= (((((W[40]^W[41])>>29)&1)-1) | ~(DV_I_44_0_bit|DV_I_47_0_bit|DV_I_48_0_bit|DV_II_46_0_bit|DV_II_47_0_bit|DV_II_56_0_bit));
+	mask &= (((((W[54]^W[55])>>29)&1)-1) | ~(DV_I_51_0_bit|DV_II_47_0_bit|DV_II_50_0_bit|DV_II_55_0_bit|DV_II_56_0_bit));
+	mask &= (((((W[53]^W[54])>>29)&1)-1) | ~(DV_I_50_0_bit|DV_II_46_0_bit|DV_II_49_0_bit|DV_II_54_0_bit|DV_II_55_0_bit));
+	mask &= (((((W[52]^W[53])>>29)&1)-1) | ~(DV_I_49_0_bit|DV_II_45_0_bit|DV_II_48_0_bit|DV_II_53_0_bit|DV_II_54_0_bit));
+	mask &= ((((W[50]^(W[53]>>25))&(1<<4))-(1<<4)) | ~(DV_I_50_0_bit|DV_I_52_0_bit|DV_II_46_0_bit|DV_II_48_0_bit|DV_II_54_0_bit));
+	mask &= (((((W[50]^W[51])>>29)&1)-1) | ~(DV_I_47_0_bit|DV_II_46_0_bit|DV_II_51_0_bit|DV_II_52_0_bit|DV_II_56_0_bit));
+	mask &= ((((W[49]^(W[52]>>25))&(1<<4))-(1<<4)) | ~(DV_I_49_0_bit|DV_I_51_0_bit|DV_II_45_0_bit|DV_II_47_0_bit|DV_II_53_0_bit));
+	mask &= ((((W[48]^(W[51]>>25))&(1<<4))-(1<<4)) | ~(DV_I_48_0_bit|DV_I_50_0_bit|DV_I_52_0_bit|DV_II_46_0_bit|DV_II_52_0_bit));
+	mask &= (((((W[42]^W[43])>>29)&1)-1) | ~(DV_I_46_0_bit|DV_I_49_0_bit|DV_I_50_0_bit|DV_II_48_0_bit|DV_II_49_0_bit));
+	mask &= (((((W[41]^W[42])>>29)&1)-1) | ~(DV_I_45_0_bit|DV_I_48_0_bit|DV_I_49_0_bit|DV_II_47_0_bit|DV_II_48_0_bit));
+	mask &= (((((W[40]>>4)^(W[43]>>29))&1)-1) | ~(DV_I_44_0_bit|DV_I_46_0_bit|DV_I_50_0_bit|DV_II_49_0_bit|DV_II_56_0_bit));
+	mask &= (((((W[39]>>4)^(W[42]>>29))&1)-1) | ~(DV_I_43_0_bit|DV_I_45_0_bit|DV_I_49_0_bit|DV_II_48_0_bit|DV_II_55_0_bit));
+	if (mask & (DV_I_44_0_bit|DV_I_48_0_bit|DV_II_47_0_bit|DV_II_54_0_bit|DV_II_56_0_bit))
+		mask &= (((((W[38]>>4)^(W[41]>>29))&1)-1) | ~(DV_I_44_0_bit|DV_I_48_0_bit|DV_II_47_0_bit|DV_II_54_0_bit|DV_II_56_0_bit));
+	mask &= (((((W[37]>>4)^(W[40]>>29))&1)-1) | ~(DV_I_43_0_bit|DV_I_47_0_bit|DV_II_46_0_bit|DV_II_53_0_bit|DV_II_55_0_bit));
+	if (mask & (DV_I_52_0_bit|DV_II_48_0_bit|DV_II_51_0_bit|DV_II_56_0_bit))
+		mask &= (((((W[55]^W[56])>>29)&1)-1) | ~(DV_I_52_0_bit|DV_II_48_0_bit|DV_II_51_0_bit|DV_II_56_0_bit));
+	if (mask & (DV_I_52_0_bit|DV_II_48_0_bit|DV_II_50_0_bit|DV_II_56_0_bit))
+		mask &= ((((W[52]^(W[55]>>25))&(1<<4))-(1<<4)) | ~(DV_I_52_0_bit|DV_II_48_0_bit|DV_II_50_0_bit|DV_II_56_0_bit));
+	if (mask & (DV_I_51_0_bit|DV_II_47_0_bit|DV_II_49_0_bit|DV_II_55_0_bit))
+		mask &= ((((W[51]^(W[54]>>25))&(1<<4))-(1<<4)) | ~(DV_I_51_0_bit|DV_II_47_0_bit|DV_II_49_0_bit|DV_II_55_0_bit));
+	if (mask & (DV_I_48_0_bit|DV_II_47_0_bit|DV_II_52_0_bit|DV_II_53_0_bit))
+		mask &= (((((W[51]^W[52])>>29)&1)-1) | ~(DV_I_48_0_bit|DV_II_47_0_bit|DV_II_52_0_bit|DV_II_53_0_bit));
+	if (mask & (DV_I_46_0_bit|DV_I_49_0_bit|DV_II_45_0_bit|DV_II_48_0_bit))
+		mask &= (((((W[36]>>4)^(W[40]>>29))&1)-1) | ~(DV_I_46_0_bit|DV_I_49_0_bit|DV_II_45_0_bit|DV_II_48_0_bit));
+	if (mask & (DV_I_52_0_bit|DV_II_48_0_bit|DV_II_49_0_bit))
+		mask &= ((0-(((W[53]^W[56])>>29)&1)) | ~(DV_I_52_0_bit|DV_II_48_0_bit|DV_II_49_0_bit));
+	if (mask & (DV_I_50_0_bit|DV_II_46_0_bit|DV_II_47_0_bit))
+		mask &= ((0-(((W[51]^W[54])>>29)&1)) | ~(DV_I_50_0_bit|DV_II_46_0_bit|DV_II_47_0_bit));
+	if (mask & (DV_I_49_0_bit|DV_I_51_0_bit|DV_II_45_0_bit))
+		mask &= ((0-(((W[50]^W[52])>>29)&1)) | ~(DV_I_49_0_bit|DV_I_51_0_bit|DV_II_45_0_bit));
+	if (mask & (DV_I_48_0_bit|DV_I_50_0_bit|DV_I_52_0_bit))
+		mask &= ((0-(((W[49]^W[51])>>29)&1)) | ~(DV_I_48_0_bit|DV_I_50_0_bit|DV_I_52_0_bit));
+	if (mask & (DV_I_47_0_bit|DV_I_49_0_bit|DV_I_51_0_bit))
+		mask &= ((0-(((W[48]^W[50])>>29)&1)) | ~(DV_I_47_0_bit|DV_I_49_0_bit|DV_I_51_0_bit));
+	if (mask & (DV_I_46_0_bit|DV_I_48_0_bit|DV_I_50_0_bit))
+		mask &= ((0-(((W[47]^W[49])>>29)&1)) | ~(DV_I_46_0_bit|DV_I_48_0_bit|DV_I_50_0_bit));
+	if (mask & (DV_I_45_0_bit|DV_I_47_0_bit|DV_I_49_0_bit))
+		mask &= ((0-(((W[46]^W[48])>>29)&1)) | ~(DV_I_45_0_bit|DV_I_47_0_bit|DV_I_49_0_bit));
+	mask &= ((((W[45]^W[47])&(1<<6))-(1<<6)) | ~(DV_I_47_2_bit|DV_I_49_2_bit|DV_I_51_2_bit));
+	if (mask & (DV_I_44_0_bit|DV_I_46_0_bit|DV_I_48_0_bit))
+		mask &= ((0-(((W[45]^W[47])>>29)&1)) | ~(DV_I_44_0_bit|DV_I_46_0_bit|DV_I_48_0_bit));
+	mask &= (((((W[44]^W[46])>>6)&1)-1) | ~(DV_I_46_2_bit|DV_I_48_2_bit|DV_I_50_2_bit));
+	if (mask & (DV_I_43_0_bit|DV_I_45_0_bit|DV_I_47_0_bit))
+		mask &= ((0-(((W[44]^W[46])>>29)&1)) | ~(DV_I_43_0_bit|DV_I_45_0_bit|DV_I_47_0_bit));
+	mask &= ((0-((W[41]^(W[42]>>5))&(1<<1))) | ~(DV_I_48_2_bit|DV_II_46_2_bit|DV_II_51_2_bit));
+	mask &= ((0-((W[40]^(W[41]>>5))&(1<<1))) | ~(DV_I_47_2_bit|DV_I_51_2_bit|DV_II_50_2_bit));
+	if (mask & (DV_I_44_0_bit|DV_I_46_0_bit|DV_II_56_0_bit))
+		mask &= ((0-(((W[40]^W[42])>>4)&1)) | ~(DV_I_44_0_bit|DV_I_46_0_bit|DV_II_56_0_bit));
+	mask &= ((0-((W[39]^(W[40]>>5))&(1<<1))) | ~(DV_I_46_2_bit|DV_I_50_2_bit|DV_II_49_2_bit));
+	if (mask & (DV_I_43_0_bit|DV_I_45_0_bit|DV_II_55_0_bit))
+		mask &= ((0-(((W[39]^W[41])>>4)&1)) | ~(DV_I_43_0_bit|DV_I_45_0_bit|DV_II_55_0_bit));
+	if (mask & (DV_I_44_0_bit|DV_II_54_0_bit|DV_II_56_0_bit))
+		mask &= ((0-(((W[38]^W[40])>>4)&1)) | ~(DV_I_44_0_bit|DV_II_54_0_bit|DV_II_56_0_bit));
+	if (mask & (DV_I_43_0_bit|DV_II_53_0_bit|DV_II_55_0_bit))
+		mask &= ((0-(((W[37]^W[39])>>4)&1)) | ~(DV_I_43_0_bit|DV_II_53_0_bit|DV_II_55_0_bit));
+	mask &= ((0-((W[36]^(W[37]>>5))&(1<<1))) | ~(DV_I_47_2_bit|DV_I_50_2_bit|DV_II_46_2_bit));
+	if (mask & (DV_I_45_0_bit|DV_I_48_0_bit|DV_II_47_0_bit))
+		mask &= (((((W[35]>>4)^(W[39]>>29))&1)-1) | ~(DV_I_45_0_bit|DV_I_48_0_bit|DV_II_47_0_bit));
+	if (mask & (DV_I_48_0_bit|DV_II_48_0_bit))
+		mask &= ((0-((W[63]^(W[64]>>5))&(1<<0))) | ~(DV_I_48_0_bit|DV_II_48_0_bit));
+	if (mask & (DV_I_45_0_bit|DV_II_45_0_bit))
+		mask &= ((0-((W[63]^(W[64]>>5))&(1<<1))) | ~(DV_I_45_0_bit|DV_II_45_0_bit));
+	if (mask & (DV_I_47_0_bit|DV_II_47_0_bit))
+		mask &= ((0-((W[62]^(W[63]>>5))&(1<<0))) | ~(DV_I_47_0_bit|DV_II_47_0_bit));
+	if (mask & (DV_I_46_0_bit|DV_II_46_0_bit))
+		mask &= ((0-((W[61]^(W[62]>>5))&(1<<0))) | ~(DV_I_46_0_bit|DV_II_46_0_bit));
+	mask &= ((0-((W[61]^(W[62]>>5))&(1<<2))) | ~(DV_I_46_2_bit|DV_II_46_2_bit));
+	if (mask & (DV_I_45_0_bit|DV_II_45_0_bit))
+		mask &= ((0-((W[60]^(W[61]>>5))&(1<<0))) | ~(DV_I_45_0_bit|DV_II_45_0_bit));
+	if (mask & (DV_II_51_0_bit|DV_II_54_0_bit))
+		mask &= (((((W[58]^W[59])>>29)&1)-1) | ~(DV_II_51_0_bit|DV_II_54_0_bit));
+	if (mask & (DV_II_50_0_bit|DV_II_53_0_bit))
+		mask &= (((((W[57]^W[58])>>29)&1)-1) | ~(DV_II_50_0_bit|DV_II_53_0_bit));
+	if (mask & (DV_II_52_0_bit|DV_II_54_0_bit))
+		mask &= ((((W[56]^(W[59]>>25))&(1<<4))-(1<<4)) | ~(DV_II_52_0_bit|DV_II_54_0_bit));
+	if (mask & (DV_II_51_0_bit|DV_II_52_0_bit))
+		mask &= ((0-(((W[56]^W[59])>>29)&1)) | ~(DV_II_51_0_bit|DV_II_52_0_bit));
+	if (mask & (DV_II_49_0_bit|DV_II_52_0_bit))
+		mask &= (((((W[56]^W[57])>>29)&1)-1) | ~(DV_II_49_0_bit|DV_II_52_0_bit));
+	if (mask & (DV_II_51_0_bit|DV_II_53_0_bit))
+		mask &= ((((W[55]^(W[58]>>25))&(1<<4))-(1<<4)) | ~(DV_II_51_0_bit|DV_II_53_0_bit));
+	if (mask & (DV_II_50_0_bit|DV_II_52_0_bit))
+		mask &= ((((W[54]^(W[57]>>25))&(1<<4))-(1<<4)) | ~(DV_II_50_0_bit|DV_II_52_0_bit));
+	if (mask & (DV_II_49_0_bit|DV_II_51_0_bit))
+		mask &= ((((W[53]^(W[56]>>25))&(1<<4))-(1<<4)) | ~(DV_II_49_0_bit|DV_II_51_0_bit));
+	mask &= ((((W[51]^(W[50]>>5))&(1<<1))-(1<<1)) | ~(DV_I_50_2_bit|DV_II_46_2_bit));
+	mask &= ((((W[48]^W[50])&(1<<6))-(1<<6)) | ~(DV_I_50_2_bit|DV_II_46_2_bit));
+	if (mask & (DV_I_51_0_bit|DV_I_52_0_bit))
+		mask &= ((0-(((W[48]^W[55])>>29)&1)) | ~(DV_I_51_0_bit|DV_I_52_0_bit));
+	mask &= ((((W[47]^W[49])&(1<<6))-(1<<6)) | ~(DV_I_49_2_bit|DV_I_51_2_bit));
+	mask &= ((((W[48]^(W[47]>>5))&(1<<1))-(1<<1)) | ~(DV_I_47_2_bit|DV_II_51_2_bit));
+	mask &= ((((W[46]^W[48])&(1<<6))-(1<<6)) | ~(DV_I_48_2_bit|DV_I_50_2_bit));
+	mask &= ((((W[47]^(W[46]>>5))&(1<<1))-(1<<1)) | ~(DV_I_46_2_bit|DV_II_50_2_bit));
+	mask &= ((0-((W[44]^(W[45]>>5))&(1<<1))) | ~(DV_I_51_2_bit|DV_II_49_2_bit));
+	mask &= ((((W[43]^W[45])&(1<<6))-(1<<6)) | ~(DV_I_47_2_bit|DV_I_49_2_bit));
+	mask &= (((((W[42]^W[44])>>6)&1)-1) | ~(DV_I_46_2_bit|DV_I_48_2_bit));
+	mask &= ((((W[43]^(W[42]>>5))&(1<<1))-(1<<1)) | ~(DV_II_46_2_bit|DV_II_51_2_bit));
+	mask &= ((((W[42]^(W[41]>>5))&(1<<1))-(1<<1)) | ~(DV_I_51_2_bit|DV_II_50_2_bit));
+	mask &= ((((W[41]^(W[40]>>5))&(1<<1))-(1<<1)) | ~(DV_I_50_2_bit|DV_II_49_2_bit));
+	if (mask & (DV_I_52_0_bit|DV_II_51_0_bit))
+		mask &= ((((W[39]^(W[43]>>25))&(1<<4))-(1<<4)) | ~(DV_I_52_0_bit|DV_II_51_0_bit));
+	if (mask & (DV_I_51_0_bit|DV_II_50_0_bit))
+		mask &= ((((W[38]^(W[42]>>25))&(1<<4))-(1<<4)) | ~(DV_I_51_0_bit|DV_II_50_0_bit));
+	if (mask & (DV_I_48_2_bit|DV_I_51_2_bit))
+		mask &= ((0-((W[37]^(W[38]>>5))&(1<<1))) | ~(DV_I_48_2_bit|DV_I_51_2_bit));
+	if (mask & (DV_I_50_0_bit|DV_II_49_0_bit))
+		mask &= ((((W[37]^(W[41]>>25))&(1<<4))-(1<<4)) | ~(DV_I_50_0_bit|DV_II_49_0_bit));
+	if (mask & (DV_II_52_0_bit|DV_II_54_0_bit))
+		mask &= ((0-((W[36]^W[38])&(1<<4))) | ~(DV_II_52_0_bit|DV_II_54_0_bit));
+	mask &= ((0-((W[35]^(W[36]>>5))&(1<<1))) | ~(DV_I_46_2_bit|DV_I_49_2_bit));
+	if (mask & (DV_I_51_0_bit|DV_II_47_0_bit))
+		mask &= ((((W[35]^(W[39]>>25))&(1<<3))-(1<<3)) | ~(DV_I_51_0_bit|DV_II_47_0_bit));
+if (mask) {
+
+	if (mask & DV_I_43_0_bit)
+		 if (
+			    !((W[61]^(W[62]>>5)) & (1<<1))
+			 || !(!((W[59]^(W[63]>>25)) & (1<<5)))
+			 || !((W[58]^(W[63]>>30)) & (1<<0))
+		 )  mask &= ~DV_I_43_0_bit;
+	if (mask & DV_I_44_0_bit)
+		 if (
+			    !((W[62]^(W[63]>>5)) & (1<<1))
+			 || !(!((W[60]^(W[64]>>25)) & (1<<5)))
+			 || !((W[59]^(W[64]>>30)) & (1<<0))
+		 )  mask &= ~DV_I_44_0_bit;
+	if (mask & DV_I_46_2_bit)
+		mask &= ((~((W[40]^W[42])>>2)) | ~DV_I_46_2_bit);
+	if (mask & DV_I_47_2_bit)
+		 if (
+			    !((W[62]^(W[63]>>5)) & (1<<2))
+			 || !(!((W[41]^W[43]) & (1<<6)))
+		 )  mask &= ~DV_I_47_2_bit;
+	if (mask & DV_I_48_2_bit)
+		 if (
+			    !((W[63]^(W[64]>>5)) & (1<<2))
+			 || !(!((W[48]^(W[49]<<5)) & (1<<6)))
+		 )  mask &= ~DV_I_48_2_bit;
+	if (mask & DV_I_49_2_bit)
+		 if (
+			    !(!((W[49]^(W[50]<<5)) & (1<<6)))
+			 || !((W[42]^W[50]) & (1<<1))
+			 || !(!((W[39]^(W[40]<<5)) & (1<<6)))
+			 || !((W[38]^W[40]) & (1<<1))
+		 )  mask &= ~DV_I_49_2_bit;
+	if (mask & DV_I_50_0_bit)
+		mask &= ((((W[36]^W[37])<<7)) | ~DV_I_50_0_bit);
+	if (mask & DV_I_50_2_bit)
+		mask &= ((((W[43]^W[51])<<11)) | ~DV_I_50_2_bit);
+	if (mask & DV_I_51_0_bit)
+		mask &= ((((W[37]^W[38])<<9)) | ~DV_I_51_0_bit);
+	if (mask & DV_I_51_2_bit)
+		 if (
+			    !(!((W[51]^(W[52]<<5)) & (1<<6)))
+			 || !(!((W[49]^W[51]) & (1<<6)))
+			 || !(!((W[37]^(W[37]>>5)) & (1<<1)))
+			 || !(!((W[35]^(W[39]>>25)) & (1<<5)))
+		 )  mask &= ~DV_I_51_2_bit;
+	if (mask & DV_I_52_0_bit)
+		mask &= ((((W[38]^W[39])<<11)) | ~DV_I_52_0_bit);
+	if (mask & DV_II_46_2_bit)
+		mask &= ((((W[47]^W[51])<<17)) | ~DV_II_46_2_bit);
+	if (mask & DV_II_48_0_bit)
+		 if (
+			    !(!((W[36]^(W[40]>>25)) & (1<<3)))
+			 || !((W[35]^(W[40]<<2)) & (1<<30))
+		 )  mask &= ~DV_II_48_0_bit;
+	if (mask & DV_II_49_0_bit)
+		 if (
+			    !(!((W[37]^(W[41]>>25)) & (1<<3)))
+			 || !((W[36]^(W[41]<<2)) & (1<<30))
+		 )  mask &= ~DV_II_49_0_bit;
+	if (mask & DV_II_49_2_bit)
+		 if (
+			    !(!((W[53]^(W[54]<<5)) & (1<<6)))
+			 || !(!((W[51]^W[53]) & (1<<6)))
+			 || !((W[50]^W[54]) & (1<<1))
+			 || !(!((W[45]^(W[46]<<5)) & (1<<6)))
+			 || !(!((W[37]^(W[41]>>25)) & (1<<5)))
+			 || !((W[36]^(W[41]>>30)) & (1<<0))
+		 )  mask &= ~DV_II_49_2_bit;
+	if (mask & DV_II_50_0_bit)
+		 if (
+			    !((W[55]^W[58]) & (1<<29))
+			 || !(!((W[38]^(W[42]>>25)) & (1<<3)))
+			 || !((W[37]^(W[42]<<2)) & (1<<30))
+		 )  mask &= ~DV_II_50_0_bit;
+	if (mask & DV_II_50_2_bit)
+		 if (
+			    !(!((W[54]^(W[55]<<5)) & (1<<6)))
+			 || !(!((W[52]^W[54]) & (1<<6)))
+			 || !((W[51]^W[55]) & (1<<1))
+			 || !((W[45]^W[47]) & (1<<1))
+			 || !(!((W[38]^(W[42]>>25)) & (1<<5)))
+			 || !((W[37]^(W[42]>>30)) & (1<<0))
+		 )  mask &= ~DV_II_50_2_bit;
+	if (mask & DV_II_51_0_bit)
+		 if (
+			    !(!((W[39]^(W[43]>>25)) & (1<<3)))
+			 || !((W[38]^(W[43]<<2)) & (1<<30))
+		 )  mask &= ~DV_II_51_0_bit;
+	if (mask & DV_II_51_2_bit)
+		 if (
+			    !(!((W[55]^(W[56]<<5)) & (1<<6)))
+			 || !(!((W[53]^W[55]) & (1<<6)))
+			 || !((W[52]^W[56]) & (1<<1))
+			 || !((W[46]^W[48]) & (1<<1))
+			 || !(!((W[39]^(W[43]>>25)) & (1<<5)))
+			 || !((W[38]^(W[43]>>30)) & (1<<0))
+		 )  mask &= ~DV_II_51_2_bit;
+	if (mask & DV_II_52_0_bit)
+		 if (
+			    !(!((W[59]^W[60]) & (1<<29)))
+			 || !(!((W[40]^(W[44]>>25)) & (1<<3)))
+			 || !(!((W[40]^(W[44]>>25)) & (1<<4)))
+			 || !((W[39]^(W[44]<<2)) & (1<<30))
+		 )  mask &= ~DV_II_52_0_bit;
+	if (mask & DV_II_53_0_bit)
+		 if (
+			    !((W[58]^W[61]) & (1<<29))
+			 || !(!((W[57]^(W[61]>>25)) & (1<<4)))
+			 || !(!((W[41]^(W[45]>>25)) & (1<<3)))
+			 || !(!((W[41]^(W[45]>>25)) & (1<<4)))
+		 )  mask &= ~DV_II_53_0_bit;
+	if (mask & DV_II_54_0_bit)
+		 if (
+			    !(!((W[58]^(W[62]>>25)) & (1<<4)))
+			 || !(!((W[42]^(W[46]>>25)) & (1<<3)))
+			 || !(!((W[42]^(W[46]>>25)) & (1<<4)))
+		 )  mask &= ~DV_II_54_0_bit;
+	if (mask & DV_II_55_0_bit)
+		 if (
+			    !(!((W[59]^(W[63]>>25)) & (1<<4)))
+			 || !(!((W[57]^(W[59]>>25)) & (1<<4)))
+			 || !(!((W[43]^(W[47]>>25)) & (1<<3)))
+			 || !(!((W[43]^(W[47]>>25)) & (1<<4)))
+		 )  mask &= ~DV_II_55_0_bit;
+	if (mask & DV_II_56_0_bit)
+		 if (
+			    !(!((W[60]^(W[64]>>25)) & (1<<4)))
+			 || !(!((W[44]^(W[48]>>25)) & (1<<3)))
+			 || !(!((W[44]^(W[48]>>25)) & (1<<4)))
+		 )  mask &= ~DV_II_56_0_bit;
+}
+
+	dvmask[0]=mask;
+}
+
+#ifdef SHA1DC_CUSTOM_TRAILING_INCLUDE_UBC_CHECK_C
+#include SHA1DC_CUSTOM_TRAILING_INCLUDE_UBC_CHECK_C
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/sha1dc/lib/ubc_check.h	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,57 @@
+/***
+* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
+* Distributed under the MIT Software License.
+* See accompanying file LICENSE.txt or copy at
+* https://opensource.org/licenses/MIT
+***/
+
+/*
+// this file was generated by the 'parse_bitrel' program in the tools section
+// using the data files from directory 'tools/data/3565'
+//
+// sha1_dvs contains a list of SHA-1 Disturbance Vectors (DV) to check
+// dvType, dvK and dvB define the DV: I(K,B) or II(K,B) (see the paper)
+// dm[80] is the expanded message block XOR-difference defined by the DV
+// testt is the step to do the recompression from for collision detection
+// maski and maskb define the bit to check for each DV in the dvmask returned by ubc_check
+//
+// ubc_check takes as input an expanded message block and verifies the unavoidable bitconditions for all listed DVs
+// it returns a dvmask where each bit belonging to a DV is set if all unavoidable bitconditions for that DV have been met
+// thus one needs to do the recompression check for each DV that has its bit set
+*/
+
+#ifndef SHA1DC_UBC_CHECK_H
+#define SHA1DC_UBC_CHECK_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef SHA1DC_NO_STANDARD_INCLUDES
+#if !defined(_MSC_VER) || _MSC_VER >= 1600
+#include <stdint.h>
+#else
+/* prior to Visual Studio 2010 */
+typedef unsigned __int32 uint32_t;
+#endif
+#endif
+
+#define DVMASKSIZE 1
+typedef struct { int dvType; int dvK; int dvB; int testt; int maski; int maskb; uint32_t dm[80]; } dv_info_t;
+extern dv_info_t sha1_dvs[];
+void ubc_check(const uint32_t W[80], uint32_t dvmask[DVMASKSIZE]);
+
+#define DOSTORESTATE58
+#define DOSTORESTATE65
+
+#define CHECK_DVMASK(_DVMASK) (0 != _DVMASK[0])
+
+#if defined(__cplusplus)
+}
+#endif
+
+#ifdef SHA1DC_CUSTOM_TRAILING_INCLUDE_UBC_CHECK_H
+#include SHA1DC_CUSTOM_TRAILING_INCLUDE_UBC_CHECK_H
+#endif
+
+#endif
--- a/mercurial/transaction.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/transaction.py	Tue Jan 21 13:14:51 2020 -0500
@@ -135,7 +135,7 @@
         validator=None,
         releasefn=None,
         checkambigfiles=None,
-        name=r'<unnamed>',
+        name='<unnamed>',
     ):
         """Begin a new transaction
 
@@ -220,8 +220,8 @@
         self._abortcallback = {}
 
     def __repr__(self):
-        name = r'/'.join(self._names)
-        return r'<transaction name=%s, count=%d, usages=%d>' % (
+        name = '/'.join(self._names)
+        return '<transaction name=%s, count=%d, usages=%d>' % (
             name,
             self._count,
             self._usages,
@@ -414,7 +414,7 @@
         self._file.flush()
 
     @active
-    def nest(self, name=r'<unnamed>'):
+    def nest(self, name='<unnamed>'):
         self._count += 1
         self._usages += 1
         self._names.append(name)
@@ -456,6 +456,12 @@
         return self._anypending
 
     @active
+    def hasfinalize(self, category):
+        """check is a callback already exist for a category
+        """
+        return category in self._finalizecallback
+
+    @active
     def addfinalize(self, category, callback):
         """add a callback to be called when the transaction is closed
 
@@ -500,9 +506,12 @@
             self._validator(self)  # will raise exception if needed
             self._validator = None  # Help prevent cycles.
             self._generatefiles(group=gengroupprefinalize)
-            categories = sorted(self._finalizecallback)
-            for cat in categories:
-                self._finalizecallback[cat](self)
+            while self._finalizecallback:
+                callbacks = self._finalizecallback
+                self._finalizecallback = {}
+                categories = sorted(callbacks)
+                for cat in categories:
+                    callbacks[cat](self)
             # Prevent double usage and help clear cycles.
             self._finalizecallback = None
             self._generatefiles(group=gengrouppostfinalize)
--- a/mercurial/ui.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/ui.py	Tue Jan 21 13:14:51 2020 -0500
@@ -45,6 +45,7 @@
 from .utils import (
     dateutil,
     procutil,
+    resourceutil,
     stringutil,
 )
 
@@ -307,6 +308,8 @@
         for t, f in rcutil.rccomponents():
             if t == b'path':
                 u.readconfig(f, trust=True)
+            elif t == b'resource':
+                u.read_resource_config(f, trust=True)
             elif t == b'items':
                 sections = set()
                 for section, name, value, source in f:
@@ -424,27 +427,65 @@
             )
         return False
 
+    def read_resource_config(
+        self, name, root=None, trust=False, sections=None, remap=None
+    ):
+        try:
+            fp = resourceutil.open_resource(name[0], name[1])
+        except IOError:
+            if not sections:  # ignore unless we were looking for something
+                return
+            raise
+
+        self._readconfig(
+            b'resource:%s.%s' % name, fp, root, trust, sections, remap
+        )
+
     def readconfig(
         self, filename, root=None, trust=False, sections=None, remap=None
     ):
         try:
-            fp = open(filename, r'rb')
+            fp = open(filename, 'rb')
         except IOError:
             if not sections:  # ignore unless we were looking for something
                 return
             raise
 
-        cfg = config.config()
-        trusted = sections or trust or self._trusted(fp, filename)
+        self._readconfig(filename, fp, root, trust, sections, remap)
+
+    def _readconfig(
+        self, filename, fp, root=None, trust=False, sections=None, remap=None
+    ):
+        with fp:
+            cfg = config.config()
+            trusted = sections or trust or self._trusted(fp, filename)
+
+            try:
+                cfg.read(filename, fp, sections=sections, remap=remap)
+            except error.ParseError as inst:
+                if trusted:
+                    raise
+                self.warn(_(b'ignored: %s\n') % stringutil.forcebytestr(inst))
+
+        self._applyconfig(cfg, trusted, root)
 
-        try:
-            cfg.read(filename, fp, sections=sections, remap=remap)
-            fp.close()
-        except error.ConfigError as inst:
-            if trusted:
-                raise
-            self.warn(_(b"ignored: %s\n") % stringutil.forcebytestr(inst))
+    def applyconfig(self, configitems, source=b"", root=None):
+        """Add configitems from a non-file source.  Unlike with ``setconfig()``,
+        they can be overridden by subsequent config file reads.  The items are
+        in the same format as ``configoverride()``, namely a dict of the
+        following structures: {(section, name) : value}
 
+        Typically this is used by extensions that inject themselves into the
+        config file load procedure by monkeypatching ``localrepo.loadhgrc()``.
+        """
+        cfg = config.config()
+
+        for (section, name), value in configitems.items():
+            cfg.set(section, name, value, source)
+
+        self._applyconfig(cfg, True, root)
+
+    def _applyconfig(self, cfg, trusted, root):
         if self.plain():
             for k in (
                 b'debug',
@@ -653,7 +694,8 @@
         return main, sub
 
     def configpath(self, section, name, default=_unset, untrusted=False):
-        b'get a path config item, expanded relative to repo root or config file'
+        """get a path config item, expanded relative to repo root or config
+        file"""
         v = self.config(section, name, default, untrusted)
         if v is None:
             return None
@@ -1087,7 +1129,7 @@
 
         # inlined _write() for speed
         if self._buffers:
-            label = opts.get(r'label', b'')
+            label = opts.get('label', b'')
             if label and self._bufferapplylabels:
                 self._buffers[-1].extend(self.label(a, label) for a in args)
             else:
@@ -1095,7 +1137,7 @@
             return
 
         # inlined _writenobuf() for speed
-        if not opts.get(r'keepprogressbar', False):
+        if not opts.get('keepprogressbar', False):
             self._progclear()
         msg = b''.join(args)
 
@@ -1108,7 +1150,7 @@
                 color.win32print(self, dest.write, msg, **opts)
             else:
                 if self._colormode is not None:
-                    label = opts.get(r'label', b'')
+                    label = opts.get('label', b'')
                     msg = self.label(msg, label)
                 dest.write(msg)
         except IOError as err:
@@ -1124,7 +1166,7 @@
     def _write(self, dest, *args, **opts):
         # update write() as well if you touch this code
         if self._isbuffered(dest):
-            label = opts.get(r'label', b'')
+            label = opts.get('label', b'')
             if label and self._bufferapplylabels:
                 self._buffers[-1].extend(self.label(a, label) for a in args)
             else:
@@ -1134,7 +1176,7 @@
 
     def _writenobuf(self, dest, *args, **opts):
         # update write() as well if you touch this code
-        if not opts.get(r'keepprogressbar', False):
+        if not opts.get('keepprogressbar', False):
             self._progclear()
         msg = b''.join(args)
 
@@ -1153,7 +1195,7 @@
                 color.win32print(self, dest.write, msg, **opts)
             else:
                 if self._colormode is not None:
-                    label = opts.get(r'label', b'')
+                    label = opts.get('label', b'')
                     msg = self.label(msg, label)
                 dest.write(msg)
             # stderr may be buffered under win32 when redirected to files,
@@ -1588,7 +1630,7 @@
         return self._prompt(msg, default=default)
 
     def _prompt(self, msg, **opts):
-        default = opts[r'default']
+        default = opts['default']
         if not self.interactive():
             self._writemsg(self._fmsgout, msg, b' ', type=b'prompt', **opts)
             self._writemsg(
@@ -1625,7 +1667,7 @@
         # prompt to start parsing. Sadly, we also can't rely on
         # choices containing spaces, ASCII, or basically anything
         # except an ampersand followed by a character.
-        m = re.match(br'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
+        m = re.match(br'(?s)(.+?)\$\$([^$]*&[^ $].*)', prompt)
         msg = m.group(1)
         choices = [p.strip(b' ') for p in m.group(2).split(b'$$')]
 
@@ -1674,7 +1716,7 @@
                         raise EOFError
                     return l.rstrip(b'\n')
                 else:
-                    return getpass.getpass(r'')
+                    return getpass.getpass('')
         except EOFError:
             raise error.ResponseExpected()
 
@@ -1765,9 +1807,8 @@
             prefix=b'hg-' + extra[b'prefix'] + b'-', suffix=suffix, dir=rdir
         )
         try:
-            f = os.fdopen(fd, r'wb')
-            f.write(util.tonativeeol(text))
-            f.close()
+            with os.fdopen(fd, 'wb') as f:
+                f.write(util.tonativeeol(text))
 
             environ = {b'HGUSER': user}
             if b'transplant_source' in extra:
@@ -1793,9 +1834,8 @@
                 blockedtag=b'editor',
             )
 
-            f = open(name, r'rb')
-            t = util.fromnativeeol(f.read())
-            f.close()
+            with open(name, 'rb') as f:
+                t = util.fromnativeeol(f.read())
         finally:
             os.unlink(name)
 
@@ -1858,13 +1898,13 @@
                 # exclude frame where 'exc' was chained and rethrown from exctb
                 self.write_err(
                     b'Traceback (most recent call last):\n',
-                    b''.join(exctb[:-1]),
-                    b''.join(causetb),
-                    b''.join(exconly),
+                    encoding.strtolocal(''.join(exctb[:-1])),
+                    encoding.strtolocal(''.join(causetb)),
+                    encoding.strtolocal(''.join(exconly)),
                 )
             else:
                 output = traceback.format_exception(exc[0], exc[1], exc[2])
-                self.write_err(encoding.strtolocal(r''.join(output)))
+                self.write_err(encoding.strtolocal(''.join(output)))
         return self.tracebackflag or force
 
     def geteditor(self):
@@ -2033,7 +2073,10 @@
             self.log(
                 b'develwarn', b'%s at: %s:%d (%s)\n', msg, fname, lineno, fmsg
             )
-            curframe = calframe = None  # avoid cycles
+
+            # avoid cycles
+            del curframe
+            del calframe
 
     def deprecwarn(self, msg, version, stacklevel=2):
         """issue a deprecation warning
@@ -2305,6 +2348,6 @@
     isn't a structured channel, so that the message will be colorized.
     """
     # TODO: maybe change 'type' to a mandatory option
-    if r'type' in opts and not getattr(dest, 'structured', False):
-        opts[r'label'] = opts.get(r'label', b'') + b' ui.%s' % opts.pop(r'type')
+    if 'type' in opts and not getattr(dest, 'structured', False):
+        opts['label'] = opts.get('label', b'') + b' ui.%s' % opts.pop('type')
     write(dest, *args, **opts)
--- a/mercurial/unionrepo.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/unionrepo.py	Tue Jan 21 13:14:51 2020 -0500
@@ -62,9 +62,10 @@
             if linkmapper is not None:  # link is to same revlog
                 base = linkmapper(base)
 
-            if node in self.nodemap:
+            this_rev = self.index.get_rev(node)
+            if this_rev is not None:
                 # this happens for the common revlog revisions
-                self.bundlerevs.add(self.nodemap[node])
+                self.bundlerevs.add(this_rev)
                 continue
 
             p1node = self.revlog2.node(p1rev)
@@ -83,7 +84,6 @@
                 node,
             )
             self.index.append(e)
-            self.nodemap[node] = n
             self.bundlerevs.add(n)
             n += 1
 
--- a/mercurial/upgrade.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/upgrade.py	Tue Jan 21 13:14:51 2020 -0500
@@ -1242,25 +1242,31 @@
             ui.warn(msg % b', '.join(sorted(incompatible)))
             revlogs = UPGRADE_ALL_REVLOGS
 
+    def write_labeled(l, label):
+        first = True
+        for r in sorted(l):
+            if not first:
+                ui.write(b', ')
+            ui.write(r, label=label)
+            first = False
+
     def printrequirements():
         ui.write(_(b'requirements\n'))
-        ui.write(
-            _(b'   preserved: %s\n')
-            % _(b', ').join(sorted(newreqs & repo.requirements))
+        ui.write(_(b'   preserved: '))
+        write_labeled(
+            newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
         )
-
+        ui.write((b'\n'))
+        removed = repo.requirements - newreqs
         if repo.requirements - newreqs:
-            ui.write(
-                _(b'   removed: %s\n')
-                % _(b', ').join(sorted(repo.requirements - newreqs))
-            )
-
-        if newreqs - repo.requirements:
-            ui.write(
-                _(b'   added: %s\n')
-                % _(b', ').join(sorted(newreqs - repo.requirements))
-            )
-
+            ui.write(_(b'   removed: '))
+            write_labeled(removed, "upgrade-repo.requirement.removed")
+            ui.write((b'\n'))
+        added = newreqs - repo.requirements
+        if added:
+            ui.write(_(b'   added: '))
+            write_labeled(added, "upgrade-repo.requirement.added")
+            ui.write((b'\n'))
         ui.write(b'\n')
 
     def printupgradeactions():
--- a/mercurial/url.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/url.py	Tue Jan 21 13:14:51 2020 -0500
@@ -147,7 +147,7 @@
             # Keys and values need to be str because the standard library
             # expects them to be.
             proxyurl = str(proxy)
-            proxies = {r'http': proxyurl, r'https': proxyurl}
+            proxies = {'http': proxyurl, 'https': proxyurl}
             ui.debug(b'proxying through %s\n' % util.hidepassword(bytes(proxy)))
         else:
             proxies = {}
@@ -204,8 +204,8 @@
 def _generic_start_transaction(handler, h, req):
     tunnel_host = req._tunnel_host
     if tunnel_host:
-        if tunnel_host[:7] not in [r'http://', r'https:/']:
-            tunnel_host = r'https://' + tunnel_host
+        if tunnel_host[:7] not in ['http://', 'https:/']:
+            tunnel_host = 'https://' + tunnel_host
         new_tunnel = True
     else:
         tunnel_host = urllibcompat.getselector(req)
@@ -228,7 +228,7 @@
         [
             (x, self.headers[x])
             for x in self.headers
-            if x.lower().startswith(r'proxy-')
+            if x.lower().startswith('proxy-')
         ]
     )
     self.send(b'CONNECT %s HTTP/1.0\r\n' % self.realhostport)
@@ -522,7 +522,7 @@
         )
         if pw is not None:
             raw = b"%s:%s" % (pycompat.bytesurl(user), pycompat.bytesurl(pw))
-            auth = r'Basic %s' % pycompat.strurl(base64.b64encode(raw).strip())
+            auth = 'Basic %s' % pycompat.strurl(base64.b64encode(raw).strip())
             if req.get_header(self.auth_header, None) == auth:
                 return None
             self.auth = auth
@@ -655,16 +655,16 @@
     # do look at this value.
     if not useragent:
         agent = b'mercurial/proto-1.0 (Mercurial %s)' % util.version()
-        opener.addheaders = [(r'User-agent', pycompat.sysstr(agent))]
+        opener.addheaders = [('User-agent', pycompat.sysstr(agent))]
     else:
-        opener.addheaders = [(r'User-agent', pycompat.sysstr(useragent))]
+        opener.addheaders = [('User-agent', pycompat.sysstr(useragent))]
 
     # This header should only be needed by wire protocol requests. But it has
     # been sent on all requests since forever. We keep sending it for backwards
     # compatibility reasons. Modern versions of the wire protocol use
     # X-HgProto-<N> for advertising client support.
     if sendaccept:
-        opener.addheaders.append((r'Accept', r'application/mercurial-0.1'))
+        opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
 
     return opener
 
--- a/mercurial/urllibcompat.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/urllibcompat.py	Tue Jan 21 13:14:51 2020 -0500
@@ -20,7 +20,7 @@
         """Add items that will be populated at the first access"""
         items = map(_sysstr, items)
         self._aliases.update(
-            (item.replace(r'_', r'').lower(), (origin, item)) for item in items
+            (item.replace('_', '').lower(), (origin, item)) for item in items
         )
 
     def _registeralias(self, origin, attr, name):
@@ -102,7 +102,7 @@
     # urllib.parse.quote() accepts both str and bytes, decodes bytes
     # (if necessary), and returns str. This is wonky. We provide a custom
     # implementation that only accepts bytes and emits bytes.
-    def quote(s, safe=r'/'):
+    def quote(s, safe='/'):
         # bytestr has an __iter__ that emits characters. quote_from_bytes()
         # does an iteration and expects ints. We coerce to bytes to appease it.
         if isinstance(s, pycompat.bytestr):
--- a/mercurial/util.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/util.py	Tue Jan 21 13:14:51 2020 -0500
@@ -53,15 +53,13 @@
 )
 from .utils import (
     compression,
+    hashutil,
     procutil,
     stringutil,
 )
 
-rustdirs = policy.importrust(r'dirstate', r'Dirs')
-
-base85 = policy.importmod(r'base85')
-osutil = policy.importmod(r'osutil')
-parsers = policy.importmod(r'parsers')
+base85 = policy.importmod('base85')
+osutil = policy.importmod('osutil')
 
 b85decode = base85.b85decode
 b85encode = base85.b85encode
@@ -165,23 +163,23 @@
     # However, module name set through PYTHONWARNINGS was exactly matched, so
     # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
     # makes the whole PYTHONWARNINGS thing useless for our usecase.
-    warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
-    warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
-    warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
+    warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
+    warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
+    warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
 if _dowarn and pycompat.ispy3:
     # silence warning emitted by passing user string to re.sub()
     warnings.filterwarnings(
-        r'ignore', r'bad escape', DeprecationWarning, r'mercurial'
+        'ignore', 'bad escape', DeprecationWarning, 'mercurial'
     )
     warnings.filterwarnings(
-        r'ignore', r'invalid escape sequence', DeprecationWarning, r'mercurial'
+        'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
     )
     # TODO: reinvent imp.is_frozen()
     warnings.filterwarnings(
-        r'ignore',
-        r'the imp module is deprecated',
+        'ignore',
+        'the imp module is deprecated',
         DeprecationWarning,
-        r'mercurial',
+        'mercurial',
     )
 
 
@@ -200,7 +198,7 @@
 
 DIGESTS = {
     b'md5': hashlib.md5,
-    b'sha1': hashlib.sha1,
+    b'sha1': hashutil.sha1,
     b'sha512': hashlib.sha512,
 }
 # List of digest types from strongest to weakest
@@ -418,10 +416,16 @@
         return data
 
 
-def mmapread(fp):
+def mmapread(fp, size=None):
+    if size == 0:
+        # size of 0 to mmap.mmap() means "all data"
+        # rather than "zero bytes", so special case that.
+        return b''
+    elif size is None:
+        size = 0
     try:
         fd = getattr(fp, 'fileno', lambda: fp)()
-        return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
+        return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
     except ValueError:
         # Empty files cannot be mmapped, but mmapread should still work.  Check
         # if the file is empty, and if so, return an empty buffer.
@@ -438,42 +442,42 @@
     """
 
     __slots__ = (
-        r'_orig',
-        r'_observer',
+        '_orig',
+        '_observer',
     )
 
     def __init__(self, fh, observer):
-        object.__setattr__(self, r'_orig', fh)
-        object.__setattr__(self, r'_observer', observer)
+        object.__setattr__(self, '_orig', fh)
+        object.__setattr__(self, '_observer', observer)
 
     def __getattribute__(self, name):
         ours = {
-            r'_observer',
+            '_observer',
             # IOBase
-            r'close',
+            'close',
             # closed if a property
-            r'fileno',
-            r'flush',
-            r'isatty',
-            r'readable',
-            r'readline',
-            r'readlines',
-            r'seek',
-            r'seekable',
-            r'tell',
-            r'truncate',
-            r'writable',
-            r'writelines',
+            'fileno',
+            'flush',
+            'isatty',
+            'readable',
+            'readline',
+            'readlines',
+            'seek',
+            'seekable',
+            'tell',
+            'truncate',
+            'writable',
+            'writelines',
             # RawIOBase
-            r'read',
-            r'readall',
-            r'readinto',
-            r'write',
+            'read',
+            'readall',
+            'readinto',
+            'write',
             # BufferedIOBase
             # raw is a property
-            r'detach',
+            'detach',
             # read defined above
-            r'read1',
+            'read1',
             # readinto defined above
             # write defined above
         }
@@ -482,30 +486,30 @@
         if name in ours:
             return object.__getattribute__(self, name)
 
-        return getattr(object.__getattribute__(self, r'_orig'), name)
+        return getattr(object.__getattribute__(self, '_orig'), name)
 
     def __nonzero__(self):
-        return bool(object.__getattribute__(self, r'_orig'))
+        return bool(object.__getattribute__(self, '_orig'))
 
     __bool__ = __nonzero__
 
     def __delattr__(self, name):
-        return delattr(object.__getattribute__(self, r'_orig'), name)
+        return delattr(object.__getattribute__(self, '_orig'), name)
 
     def __setattr__(self, name, value):
-        return setattr(object.__getattribute__(self, r'_orig'), name, value)
+        return setattr(object.__getattribute__(self, '_orig'), name, value)
 
     def __iter__(self):
-        return object.__getattribute__(self, r'_orig').__iter__()
+        return object.__getattribute__(self, '_orig').__iter__()
 
     def _observedcall(self, name, *args, **kwargs):
         # Call the original object.
-        orig = object.__getattribute__(self, r'_orig')
+        orig = object.__getattribute__(self, '_orig')
         res = getattr(orig, name)(*args, **kwargs)
 
         # Call a method on the observer of the same name with arguments
         # so it can react, log, etc.
-        observer = object.__getattribute__(self, r'_observer')
+        observer = object.__getattribute__(self, '_observer')
         fn = getattr(observer, name, None)
         if fn:
             fn(res, *args, **kwargs)
@@ -513,98 +517,98 @@
         return res
 
     def close(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'close', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'close', *args, **kwargs
         )
 
     def fileno(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'fileno', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'fileno', *args, **kwargs
         )
 
     def flush(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'flush', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'flush', *args, **kwargs
         )
 
     def isatty(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'isatty', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'isatty', *args, **kwargs
         )
 
     def readable(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'readable', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'readable', *args, **kwargs
         )
 
     def readline(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'readline', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'readline', *args, **kwargs
         )
 
     def readlines(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'readlines', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'readlines', *args, **kwargs
         )
 
     def seek(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'seek', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'seek', *args, **kwargs
         )
 
     def seekable(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'seekable', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'seekable', *args, **kwargs
         )
 
     def tell(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'tell', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'tell', *args, **kwargs
         )
 
     def truncate(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'truncate', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'truncate', *args, **kwargs
         )
 
     def writable(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'writable', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'writable', *args, **kwargs
         )
 
     def writelines(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'writelines', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'writelines', *args, **kwargs
         )
 
     def read(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'read', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'read', *args, **kwargs
         )
 
     def readall(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'readall', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'readall', *args, **kwargs
         )
 
     def readinto(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'readinto', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'readinto', *args, **kwargs
         )
 
     def write(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'write', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'write', *args, **kwargs
         )
 
     def detach(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'detach', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'detach', *args, **kwargs
         )
 
     def read1(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'read1', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'read1', *args, **kwargs
         )
 
 
@@ -651,18 +655,18 @@
 
 
 PROXIED_SOCKET_METHODS = {
-    r'makefile',
-    r'recv',
-    r'recvfrom',
-    r'recvfrom_into',
-    r'recv_into',
-    r'send',
-    r'sendall',
-    r'sendto',
-    r'setblocking',
-    r'settimeout',
-    r'gettimeout',
-    r'setsockopt',
+    'makefile',
+    'recv',
+    'recvfrom',
+    'recvfrom_into',
+    'recv_into',
+    'send',
+    'sendall',
+    'sendto',
+    'setblocking',
+    'settimeout',
+    'gettimeout',
+    'setsockopt',
 }
 
 
@@ -676,39 +680,39 @@
     """
 
     __slots__ = (
-        r'_orig',
-        r'_observer',
+        '_orig',
+        '_observer',
     )
 
     def __init__(self, sock, observer):
-        object.__setattr__(self, r'_orig', sock)
-        object.__setattr__(self, r'_observer', observer)
+        object.__setattr__(self, '_orig', sock)
+        object.__setattr__(self, '_observer', observer)
 
     def __getattribute__(self, name):
         if name in PROXIED_SOCKET_METHODS:
             return object.__getattribute__(self, name)
 
-        return getattr(object.__getattribute__(self, r'_orig'), name)
+        return getattr(object.__getattribute__(self, '_orig'), name)
 
     def __delattr__(self, name):
-        return delattr(object.__getattribute__(self, r'_orig'), name)
+        return delattr(object.__getattribute__(self, '_orig'), name)
 
     def __setattr__(self, name, value):
-        return setattr(object.__getattribute__(self, r'_orig'), name, value)
+        return setattr(object.__getattribute__(self, '_orig'), name, value)
 
     def __nonzero__(self):
-        return bool(object.__getattribute__(self, r'_orig'))
+        return bool(object.__getattribute__(self, '_orig'))
 
     __bool__ = __nonzero__
 
     def _observedcall(self, name, *args, **kwargs):
         # Call the original object.
-        orig = object.__getattribute__(self, r'_orig')
+        orig = object.__getattribute__(self, '_orig')
         res = getattr(orig, name)(*args, **kwargs)
 
         # Call a method on the observer of the same name with arguments
         # so it can react, log, etc.
-        observer = object.__getattribute__(self, r'_observer')
+        observer = object.__getattribute__(self, '_observer')
         fn = getattr(observer, name, None)
         if fn:
             fn(res, *args, **kwargs)
@@ -716,13 +720,13 @@
         return res
 
     def makefile(self, *args, **kwargs):
-        res = object.__getattribute__(self, r'_observedcall')(
-            r'makefile', *args, **kwargs
+        res = object.__getattribute__(self, '_observedcall')(
+            'makefile', *args, **kwargs
         )
 
         # The file object may be used for I/O. So we turn it into a
         # proxy using our observer.
-        observer = object.__getattribute__(self, r'_observer')
+        observer = object.__getattribute__(self, '_observer')
         return makeloggingfileobject(
             observer.fh,
             res,
@@ -734,62 +738,68 @@
         )
 
     def recv(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'recv', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'recv', *args, **kwargs
         )
 
     def recvfrom(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'recvfrom', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'recvfrom', *args, **kwargs
         )
 
     def recvfrom_into(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'recvfrom_into', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'recvfrom_into', *args, **kwargs
         )
 
     def recv_into(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'recv_info', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'recv_info', *args, **kwargs
         )
 
     def send(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'send', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'send', *args, **kwargs
         )
 
     def sendall(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'sendall', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'sendall', *args, **kwargs
         )
 
     def sendto(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'sendto', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'sendto', *args, **kwargs
         )
 
     def setblocking(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'setblocking', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'setblocking', *args, **kwargs
         )
 
     def settimeout(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'settimeout', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'settimeout', *args, **kwargs
         )
 
     def gettimeout(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'gettimeout', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'gettimeout', *args, **kwargs
         )
 
     def setsockopt(self, *args, **kwargs):
-        return object.__getattribute__(self, r'_observedcall')(
-            r'setsockopt', *args, **kwargs
+        return object.__getattribute__(self, '_observedcall')(
+            'setsockopt', *args, **kwargs
         )
 
 
 class baseproxyobserver(object):
+    def __init__(self, fh, name, logdata, logdataapis):
+        self.fh = fh
+        self.name = name
+        self.logdata = logdata
+        self.logdataapis = logdataapis
+
     def _writedata(self, data):
         if not self.logdata:
             if self.logdataapis:
@@ -826,10 +836,7 @@
     def __init__(
         self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
     ):
-        self.fh = fh
-        self.name = name
-        self.logdata = logdata
-        self.logdataapis = logdataapis
+        super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
         self.reads = reads
         self.writes = writes
 
@@ -952,13 +959,10 @@
         logdata=False,
         logdataapis=True,
     ):
-        self.fh = fh
-        self.name = name
+        super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
         self.reads = reads
         self.writes = writes
         self.states = states
-        self.logdata = logdata
-        self.logdataapis = logdataapis
 
     def makefile(self, res, mode=None, bufsize=None):
         if not self.states:
@@ -1168,7 +1172,7 @@
     """
     if not v:
         v = version()
-    m = remod.match(br'(\d+(?:\.\d+){,2})[\+-]?(.*)', v)
+    m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
     if not m:
         vparts, extra = b'', v
     elif m.group(2):
@@ -1176,6 +1180,8 @@
     else:
         vparts, extra = m.group(1), None
 
+    assert vparts is not None  # help pytype
+
     vints = []
     for i in vparts.split(b'.'):
         try:
@@ -1198,12 +1204,12 @@
     '''cache the result of function calls'''
     # XXX doesn't handle keywords args
     if func.__code__.co_argcount == 0:
-        cache = []
+        listcache = []
 
         def f():
-            if len(cache) == 0:
-                cache.append(func())
-            return cache[0]
+            if len(listcache) == 0:
+                listcache.append(func())
+            return listcache[0]
 
         return f
     cache = {}
@@ -1254,6 +1260,9 @@
     >>> d2.update([(b'a', 2)])
     >>> list(d2.keys()) # should still be in last-set order
     ['b', 'a']
+    >>> d1.insert(1, b'a.5', 0.5)
+    >>> d1
+    sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
     '''
 
     def __setitem__(self, key, value):
@@ -1269,6 +1278,14 @@
             for k, v in src:
                 self[k] = v
 
+    def insert(self, position, key, value):
+        for (i, (k, v)) in enumerate(list(self.items())):
+            if i == position:
+                self[key] = value
+            if i >= position:
+                del self[k]
+                self[k] = v
+
 
 class cowdict(cow, dict):
     """copy-on-write dict
@@ -1304,7 +1321,7 @@
     """
 
 
-class transactional(object):
+class transactional(object):  # pytype: disable=ignored-metaclass
     """Base class for making a transactional type into a context manager."""
 
     __metaclass__ = abc.ABCMeta
@@ -1362,7 +1379,7 @@
     pair for the dictionary entry.
     """
 
-    __slots__ = (r'next', r'prev', r'key', r'value', r'cost')
+    __slots__ = ('next', 'prev', 'key', 'value', 'cost')
 
     def __init__(self):
         self.next = None
@@ -1483,6 +1500,8 @@
             if default is _notset:
                 raise
             return default
+
+        assert node is not None  # help pytype
         value = node.value
         self.totalcost -= node.cost
         node.markempty()
@@ -1570,6 +1589,8 @@
         while n.key is _notset:
             n = n.prev
 
+        assert n is not None  # help pytype
+
         key, value = n.key, n.value
 
         # And remove it from the cache and mark it as empty.
@@ -1825,16 +1846,6 @@
     return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
 
 
-# the location of data files matching the source code
-if procutil.mainfrozen() and getattr(sys, 'frozen', None) != b'macosx_app':
-    # executable version (py2exe) doesn't support __file__
-    datapath = os.path.dirname(pycompat.sysexecutable)
-else:
-    datapath = os.path.dirname(pycompat.fsencode(__file__))
-
-i18n.setdatapath(datapath)
-
-
 def checksignature(func):
     '''wrap a function with code to check for calling errors'''
 
@@ -2053,15 +2064,17 @@
             )
 
 
+timer = getattr(time, "perf_counter", None)
+
 if pycompat.iswindows:
     checkosfilename = checkwinfilename
-    timer = time.clock
+    if not timer:
+        timer = time.clock
 else:
-    checkosfilename = platform.checkosfilename
-    timer = time.time
-
-if safehasattr(time, "perf_counter"):
-    timer = time.perf_counter
+    # mercurial.windows doesn't have platform.checkosfilename
+    checkosfilename = platform.checkosfilename  # pytype: disable=module-attr
+    if not timer:
+        timer = time.time
 
 
 def makelock(info, pathname):
@@ -2132,7 +2145,7 @@
 
 
 try:
-    import re2
+    import re2  # pytype: disable=import-error
 
     _re2 = None
 except ImportError:
@@ -3479,6 +3492,7 @@
     f=procutil.stderr,
     otherf=procutil.stdout,
     depth=0,
+    prefix=b'',
 ):
     '''Writes a message to f (stderr) with a nicely formatted stacktrace.
     Skips the 'skip' entries closest to the call, then show 'depth' entries.
@@ -3488,68 +3502,12 @@
     '''
     if otherf:
         otherf.flush()
-    f.write(b'%s at:\n' % msg.rstrip())
+    f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
     for line in getstackframes(skip + 1, depth=depth):
-        f.write(line)
+        f.write(prefix + line)
     f.flush()
 
 
-class dirs(object):
-    '''a multiset of directory names from a dirstate or manifest'''
-
-    def __init__(self, map, skip=None):
-        self._dirs = {}
-        addpath = self.addpath
-        if isinstance(map, dict) and skip is not None:
-            for f, s in pycompat.iteritems(map):
-                if s[0] != skip:
-                    addpath(f)
-        elif skip is not None:
-            raise error.ProgrammingError(
-                b"skip character is only supported with a dict source"
-            )
-        else:
-            for f in map:
-                addpath(f)
-
-    def addpath(self, path):
-        dirs = self._dirs
-        for base in finddirs(path):
-            if base in dirs:
-                dirs[base] += 1
-                return
-            dirs[base] = 1
-
-    def delpath(self, path):
-        dirs = self._dirs
-        for base in finddirs(path):
-            if dirs[base] > 1:
-                dirs[base] -= 1
-                return
-            del dirs[base]
-
-    def __iter__(self):
-        return iter(self._dirs)
-
-    def __contains__(self, d):
-        return d in self._dirs
-
-
-if safehasattr(parsers, 'dirs'):
-    dirs = parsers.dirs
-
-if rustdirs is not None:
-    dirs = rustdirs
-
-
-def finddirs(path):
-    pos = path.rfind(b'/')
-    while pos != -1:
-        yield path[:pos]
-        pos = path.rfind(b'/', 0, pos)
-    yield b''
-
-
 # convenient shortcut
 dst = debugstacktrace
 
--- a/mercurial/utils/cborutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/utils/cborutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -46,20 +46,20 @@
 
 # Indefinite types begin with their major type ORd with information value 31.
 BEGIN_INDEFINITE_BYTESTRING = struct.pack(
-    r'>B', MAJOR_TYPE_BYTESTRING << 5 | SUBTYPE_INDEFINITE
+    '>B', MAJOR_TYPE_BYTESTRING << 5 | SUBTYPE_INDEFINITE
 )
 BEGIN_INDEFINITE_ARRAY = struct.pack(
-    r'>B', MAJOR_TYPE_ARRAY << 5 | SUBTYPE_INDEFINITE
+    '>B', MAJOR_TYPE_ARRAY << 5 | SUBTYPE_INDEFINITE
 )
 BEGIN_INDEFINITE_MAP = struct.pack(
-    r'>B', MAJOR_TYPE_MAP << 5 | SUBTYPE_INDEFINITE
+    '>B', MAJOR_TYPE_MAP << 5 | SUBTYPE_INDEFINITE
 )
 
-ENCODED_LENGTH_1 = struct.Struct(r'>B')
-ENCODED_LENGTH_2 = struct.Struct(r'>BB')
-ENCODED_LENGTH_3 = struct.Struct(r'>BH')
-ENCODED_LENGTH_4 = struct.Struct(r'>BL')
-ENCODED_LENGTH_5 = struct.Struct(r'>BQ')
+ENCODED_LENGTH_1 = struct.Struct('>B')
+ENCODED_LENGTH_2 = struct.Struct('>BB')
+ENCODED_LENGTH_3 = struct.Struct('>BH')
+ENCODED_LENGTH_4 = struct.Struct('>BL')
+ENCODED_LENGTH_5 = struct.Struct('>BQ')
 
 # The break ends an indefinite length item.
 BREAK = b'\xff'
@@ -262,7 +262,7 @@
         return ord(b[i])
 
 
-STRUCT_BIG_UBYTE = struct.Struct(r'>B')
+STRUCT_BIG_UBYTE = struct.Struct('>B')
 STRUCT_BIG_USHORT = struct.Struct(b'>H')
 STRUCT_BIG_ULONG = struct.Struct(b'>L')
 STRUCT_BIG_ULONGLONG = struct.Struct(b'>Q')
--- a/mercurial/utils/compression.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/utils/compression.py	Tue Jan 21 13:14:51 2020 -0500
@@ -29,8 +29,7 @@
 CLIENTROLE = b'client'
 
 compewireprotosupport = collections.namedtuple(
-    r'compenginewireprotosupport',
-    (r'name', r'serverpriority', r'clientpriority'),
+    'compenginewireprotosupport', ('name', 'serverpriority', 'clientpriority'),
 )
 
 
@@ -646,7 +645,7 @@
         # Not all installs have the zstd module available. So defer importing
         # until first access.
         try:
-            from .. import zstd
+            from .. import zstd  # pytype: disable=import-error
 
             # Force delayed import.
             zstd.__version__
--- a/mercurial/utils/dateutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/utils/dateutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -209,6 +209,8 @@
     True
     >>> tz == strtz
     True
+    >>> parsedate(b'2000 UTC', formats=extendeddateformats)
+    (946684800, 0)
     """
     if bias is None:
         bias = {}
@@ -223,7 +225,7 @@
     if date == b'now' or date == _(b'now'):
         return makedate()
     if date == b'today' or date == _(b'today'):
-        date = datetime.date.today().strftime(r'%b %d')
+        date = datetime.date.today().strftime('%b %d')
         date = encoding.strtolocal(date)
     elif date == b'yesterday' or date == _(b'yesterday'):
         date = (datetime.date.today() - datetime.timedelta(days=1)).strftime(
@@ -244,7 +246,8 @@
                 if part[0:1] in b"HMS":
                     b = b"00"
                 else:
-                    b = b"0"
+                    # year, month, and day start from 1
+                    b = b"1"
 
             # this piece is for matching the generic end to today's date
             n = datestr(now, b"%" + part[0:1])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/utils/hashutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+
+import hashlib
+
+try:
+    from ..thirdparty import sha1dc
+
+    sha1 = sha1dc.sha1
+except (ImportError, AttributeError):
+    sha1 = hashlib.sha1
--- a/mercurial/utils/procutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/utils/procutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -11,7 +11,6 @@
 
 import contextlib
 import errno
-import imp
 import io
 import os
 import signal
@@ -32,7 +31,10 @@
     pycompat,
 )
 
-osutil = policy.importmod(r'osutil')
+# Import like this to keep import-checker happy
+from ..utils import resourceutil
+
+osutil = policy.importmod('osutil')
 
 stderr = pycompat.stderr
 stdin = pycompat.stdin
@@ -52,11 +54,11 @@
 if isatty(stdout):
     if pycompat.iswindows:
         # Windows doesn't support line buffering
-        stdout = os.fdopen(stdout.fileno(), r'wb', 0)
+        stdout = os.fdopen(stdout.fileno(), 'wb', 0)
     elif not pycompat.ispy3:
         # on Python 3, stdout (sys.stdout.buffer) is already line buffered and
         # buffering=1 is not handled in binary mode
-        stdout = os.fdopen(stdout.fileno(), r'wb', 1)
+        stdout = os.fdopen(stdout.fileno(), 'wb', 1)
 
 if pycompat.iswindows:
     from .. import windows as platform
@@ -211,7 +213,7 @@
     inname, outname = None, None
     try:
         infd, inname = pycompat.mkstemp(prefix=b'hg-filter-in-')
-        fp = os.fdopen(infd, r'wb')
+        fp = os.fdopen(infd, 'wb')
         fp.write(s)
         fp.close()
         outfd, outname = pycompat.mkstemp(prefix=b'hg-filter-out-')
@@ -247,26 +249,14 @@
 
 
 def filter(s, cmd):
-    b"filter a string through a command that transforms its input to its output"
+    """filter a string through a command that transforms its input to its
+    output"""
     for name, fn in pycompat.iteritems(_filtertable):
         if cmd.startswith(name):
             return fn(s, cmd[len(name) :].lstrip())
     return pipefilter(s, cmd)
 
 
-def mainfrozen():
-    """return True if we are a frozen executable.
-
-    The code supports py2exe (most common, Windows only) and tools/freeze
-    (portable, not much used).
-    """
-    return (
-        pycompat.safehasattr(sys, "frozen")
-        or pycompat.safehasattr(sys, "importers")  # new py2exe
-        or imp.is_frozen(r"__main__")  # old py2exe
-    )  # tools/freeze
-
-
 _hgexecutable = None
 
 
@@ -277,21 +267,18 @@
     """
     if _hgexecutable is None:
         hg = encoding.environ.get(b'HG')
-        mainmod = sys.modules[r'__main__']
+        mainmod = sys.modules['__main__']
         if hg:
             _sethgexecutable(hg)
-        elif mainfrozen():
-            if getattr(sys, 'frozen', None) == b'macosx_app':
+        elif resourceutil.mainfrozen():
+            if getattr(sys, 'frozen', None) == 'macosx_app':
                 # Env variable set by py2app
                 _sethgexecutable(encoding.environ[b'EXECUTABLEPATH'])
             else:
                 _sethgexecutable(pycompat.sysexecutable)
         elif (
             not pycompat.iswindows
-            and os.path.basename(
-                pycompat.fsencode(getattr(mainmod, '__file__', b''))
-            )
-            == b'hg'
+            and os.path.basename(getattr(mainmod, '__file__', '')) == 'hg'
         ):
             _sethgexecutable(pycompat.fsencode(mainmod.__file__))
         else:
@@ -340,11 +327,11 @@
         nullfd = os.open(os.devnull, os.O_RDONLY)
         os.dup2(nullfd, uin.fileno())
         os.close(nullfd)
-        fin = os.fdopen(newfd, r'rb')
+        fin = os.fdopen(newfd, 'rb')
     if _testfileno(uout, stdout):
         newfd = os.dup(uout.fileno())
         os.dup2(stderr.fileno(), uout.fileno())
-        fout = os.fdopen(newfd, r'wb')
+        fout = os.fdopen(newfd, 'wb')
     return fin, fout
 
 
@@ -361,7 +348,7 @@
     """return environ with optional override, useful for shelling out"""
 
     def py2shell(val):
-        b'convert python object into string that is useful to shell'
+        """convert python object into string that is useful to shell"""
         if val is None or val is False:
             return b'0'
         if val is True:
@@ -378,7 +365,9 @@
 if pycompat.iswindows:
 
     def shelltonative(cmd, env):
-        return platform.shelltocmdexe(cmd, shellenviron(env))
+        return platform.shelltocmdexe(  # pytype: disable=module-attr
+            cmd, shellenviron(env)
+        )
 
     tonativestr = encoding.strfromlocal
 else:
@@ -434,7 +423,10 @@
     return rc
 
 
-def gui():
+_is_gui = None
+
+
+def _gui():
     '''Are we running in a GUI?'''
     if pycompat.isdarwin:
         if b'SSH_CONNECTION' in encoding.environ:
@@ -450,6 +442,13 @@
         return pycompat.iswindows or encoding.environ.get(b"DISPLAY")
 
 
+def gui():
+    global _is_gui
+    if _is_gui is None:
+        _is_gui = _gui()
+    return _is_gui
+
+
 def hgcmd():
     """Return the command used to execute current hg
 
@@ -457,8 +456,8 @@
     to avoid things opening new shell windows like batch files, so we
     get either the python call or current executable.
     """
-    if mainfrozen():
-        if getattr(sys, 'frozen', None) == b'macosx_app':
+    if resourceutil.mainfrozen():
+        if getattr(sys, 'frozen', None) == 'macosx_app':
             # Env variable set by py2app
             return [encoding.environ[b'EXECUTABLEPATH']]
         else:
@@ -545,7 +544,10 @@
     # Following creation flags might create a console GUI window.
     # Using subprocess.CREATE_NEW_CONSOLE might helps.
     # See https://phab.mercurial-scm.org/D1701 for discussion
-    _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP
+    _creationflags = (
+        DETACHED_PROCESS
+        | subprocess.CREATE_NEW_PROCESS_GROUP  # pytype: disable=module-attr
+    )
 
     def runbgcommand(
         script,
@@ -591,6 +593,11 @@
         `Subprocess.wait` function for the spawned process.  This is mostly
         useful for developers that need to make sure the spawned process
         finished before a certain point. (eg: writing test)'''
+        if pycompat.isdarwin:
+            # avoid crash in CoreFoundation in case another thread
+            # calls gui() while we're calling fork().
+            gui()
+
         # double-fork to completely detach from the parent process
         # based on http://code.activestate.com/recipes/278731
         if record_wait is None:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/utils/resourceutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,84 @@
+# resourceutil.py - utility for looking up resources
+#
+#  Copyright 2005 K. Thananchayan <thananck@yahoo.com>
+#  Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#  Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import imp
+import os
+import sys
+
+from .. import pycompat
+
+
+def mainfrozen():
+    """return True if we are a frozen executable.
+
+    The code supports py2exe (most common, Windows only) and tools/freeze
+    (portable, not much used).
+    """
+    return (
+        pycompat.safehasattr(sys, "frozen")
+        or pycompat.safehasattr(sys, "importers")  # new py2exe
+        or imp.is_frozen("__main__")  # old py2exe
+    )  # tools/freeze
+
+
+# the location of data files matching the source code
+if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
+    # executable version (py2exe) doesn't support __file__
+    datapath = os.path.dirname(pycompat.sysexecutable)
+else:
+    datapath = os.path.dirname(os.path.dirname(pycompat.fsencode(__file__)))
+    _rootpath = os.path.dirname(datapath)
+
+try:
+    from importlib import resources
+
+    from .. import encoding
+
+    # Force loading of the resources module
+    resources.open_binary  # pytype: disable=module-attr
+
+    def open_resource(package, name):
+        return resources.open_binary(  # pytype: disable=module-attr
+            pycompat.sysstr(package), pycompat.sysstr(name)
+        )
+
+    def is_resource(package, name):
+        return resources.is_resource(
+            pycompat.sysstr(package), encoding.strfromlocal(name)
+        )
+
+    def contents(package):
+        for r in resources.contents(pycompat.sysstr(package)):
+            yield encoding.strtolocal(r)
+
+
+except (ImportError, AttributeError):
+
+    def _package_path(package):
+        return os.path.join(_rootpath, *package.split(b'.'))
+
+    def open_resource(package, name):
+        path = os.path.join(_package_path(package), name)
+        return open(path, 'rb')
+
+    def is_resource(package, name):
+        path = os.path.join(_package_path(package), name)
+
+        try:
+            return os.path.isfile(pycompat.fsdecode(path))
+        except (IOError, OSError):
+            return False
+
+    def contents(package):
+        path = pycompat.fsdecode(_package_path(package))
+
+        for p in os.listdir(path):
+            yield pycompat.fsencode(p)
--- a/mercurial/utils/storageutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/utils/storageutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import hashlib
 import re
 import struct
 
@@ -24,8 +23,9 @@
     pycompat,
 )
 from ..interfaces import repository
+from ..utils import hashutil
 
-_nullhash = hashlib.sha1(nullid)
+_nullhash = hashutil.sha1(nullid)
 
 
 def hashrevisionsha1(text, p1, p2):
@@ -48,7 +48,7 @@
         else:
             a = p2
             b = p1
-        s = hashlib.sha1(a)
+        s = hashutil.sha1(a)
         s.update(b)
     s.update(text)
     return s.digest()
--- a/mercurial/utils/stringutil.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/utils/stringutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -593,7 +593,7 @@
     )
 
 
-_correctauthorformat = remod.compile(br'^[^<]+\s\<[^<>]+@[^<>]+\>$')
+_correctauthorformat = remod.compile(br'^[^<]+\s<[^<>]+@[^<>]+>$')
 
 
 def isauthorwellformed(author):
@@ -719,7 +719,7 @@
 
                 # First chunk on line is whitespace -- drop it, unless this
                 # is the very beginning of the text (i.e. no lines started yet).
-                if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
+                if self.drop_whitespace and chunks[-1].strip() == '' and lines:
                     del chunks[-1]
 
                 while chunks:
@@ -750,7 +750,7 @@
                 # Convert current line back to a string and store it in list
                 # of all lines (return value).
                 if cur_line:
-                    lines.append(indent + r''.join(cur_line))
+                    lines.append(indent + ''.join(cur_line))
 
             return lines
 
--- a/mercurial/verify.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/verify.py	Tue Jan 21 13:14:51 2020 -0500
@@ -54,7 +54,7 @@
         self.havecl = len(repo.changelog) > 0
         self.havemf = len(repo.manifestlog.getstorage(b'')) > 0
         self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
-        self.lrugetctx = util.lrucachefunc(repo.__getitem__)
+        self.lrugetctx = util.lrucachefunc(repo.unfiltered().__getitem__)
         self.refersmf = False
         self.fncachewarned = False
         # developer config: verify.skipflags
@@ -529,6 +529,8 @@
             else:
                 # Guard against implementations not setting this.
                 state[b'skipread'] = set()
+                state[b'safe_renamed'] = set()
+
                 for problem in fl.verifyintegrity(state):
                     if problem.node is not None:
                         linkrev = fl.linkrev(fl.rev(problem.node))
@@ -560,13 +562,14 @@
                     else:
                         del filenodes[f][n]
 
-                if n in state[b'skipread']:
+                if n in state[b'skipread'] and n not in state[b'safe_renamed']:
                     continue
 
                 # check renames
                 try:
-                    # This requires resolving fulltext (at least on revlogs). We
-                    # may want ``verifyintegrity()`` to pass a set of nodes with
+                    # This requires resolving fulltext (at least on revlogs,
+                    # though not with LFS revisions). We may want
+                    # ``verifyintegrity()`` to pass a set of nodes with
                     # rename metadata as an optimization.
                     rp = fl.renamed(n)
                     if rp:
--- a/mercurial/vfs.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/vfs.py	Tue Jan 21 13:14:51 2020 -0500
@@ -52,11 +52,17 @@
 
     def __init__(self, *args, **kwargs):
         '''Prevent instantiation; don't call this from subclasses.'''
-        raise NotImplementedError(b'attempted instantiating ' + str(type(self)))
+        raise NotImplementedError('attempted instantiating ' + str(type(self)))
+
+    def __call__(self, path, mode=b'rb', **kwargs):
+        raise NotImplementedError
 
     def _auditpath(self, path, mode):
         raise NotImplementedError
 
+    def join(self, path, *insidef):
+        raise NotImplementedError
+
     def tryread(self, path):
         '''gracefully return an empty string for missing files'''
         try:
@@ -301,7 +307,10 @@
         # Sharing backgroundfilecloser between threads is complex and using
         # multiple instances puts us at risk of running out of file descriptors
         # only allow to use backgroundfilecloser when in main thread.
-        if not isinstance(threading.currentThread(), threading._MainThread):
+        if not isinstance(
+            threading.currentThread(),
+            threading._MainThread,  # pytype: disable=module-attr
+        ):
             yield
             return
         vfs = getattr(self, 'vfs', self)
@@ -312,10 +321,14 @@
 
         with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
             try:
-                vfs._backgroundfilecloser = bfc
+                vfs._backgroundfilecloser = (
+                    bfc  # pytype: disable=attribute-error
+                )
                 yield bfc
             finally:
-                vfs._backgroundfilecloser = None
+                vfs._backgroundfilecloser = (
+                    None  # pytype: disable=attribute-error
+                )
 
 
 class vfs(abstractvfs):
@@ -471,9 +484,12 @@
             fp = checkambigatclosing(fp)
 
         if backgroundclose and isinstance(
-            threading.currentThread(), threading._MainThread
+            threading.currentThread(),
+            threading._MainThread,  # pytype: disable=module-attr
         ):
-            if not self._backgroundfilecloser:
+            if (
+                not self._backgroundfilecloser  # pytype: disable=attribute-error
+            ):
                 raise error.Abort(
                     _(
                         b'backgroundclose can only be used when a '
@@ -481,7 +497,10 @@
                     )
                 )
 
-            fp = delayclosedfile(fp, self._backgroundfilecloser)
+            fp = delayclosedfile(
+                fp,
+                self._backgroundfilecloser,  # pytype: disable=attribute-error
+            )
 
         return fp
 
@@ -573,7 +592,7 @@
     """
 
     def __init__(self, fh):
-        object.__setattr__(self, r'_origfh', fh)
+        object.__setattr__(self, '_origfh', fh)
 
     def __getattr__(self, attr):
         return getattr(self._origfh, attr)
@@ -589,10 +608,10 @@
         return self
 
     def __exit__(self, exc_type, exc_value, exc_tb):
-        raise NotImplementedError(b'attempted instantiating ' + str(type(self)))
+        raise NotImplementedError('attempted instantiating ' + str(type(self)))
 
     def close(self):
-        raise NotImplementedError(b'attempted instantiating ' + str(type(self)))
+        raise NotImplementedError('attempted instantiating ' + str(type(self)))
 
 
 class delayclosedfile(closewrapbase):
@@ -603,7 +622,7 @@
 
     def __init__(self, fh, closer):
         super(delayclosedfile, self).__init__(fh)
-        object.__setattr__(self, r'_closer', closer)
+        object.__setattr__(self, '_closer', closer)
 
     def __exit__(self, exc_type, exc_value, exc_tb):
         self._closer.close(self._origfh)
@@ -649,7 +668,7 @@
         self._running = True
 
         for i in range(threadcount):
-            t = threading.Thread(target=self._worker, name=b'backgroundcloser')
+            t = threading.Thread(target=self._worker, name='backgroundcloser')
             self._threads.append(t)
             t.start()
 
@@ -717,7 +736,7 @@
 
     def __init__(self, fh):
         super(checkambigatclosing, self).__init__(fh)
-        object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
+        object.__setattr__(self, '_oldstat', util.filestat.frompath(fh.name))
 
     def _checkambig(self):
         oldstat = self._oldstat
--- a/mercurial/win32.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/win32.py	Tue Jan 21 13:14:51 2020 -0500
@@ -57,21 +57,21 @@
 
 
 class _FILETIME(ctypes.Structure):
-    _fields_ = [(r'dwLowDateTime', _DWORD), (r'dwHighDateTime', _DWORD)]
+    _fields_ = [('dwLowDateTime', _DWORD), ('dwHighDateTime', _DWORD)]
 
 
 class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
     _fields_ = [
-        (r'dwFileAttributes', _DWORD),
-        (r'ftCreationTime', _FILETIME),
-        (r'ftLastAccessTime', _FILETIME),
-        (r'ftLastWriteTime', _FILETIME),
-        (r'dwVolumeSerialNumber', _DWORD),
-        (r'nFileSizeHigh', _DWORD),
-        (r'nFileSizeLow', _DWORD),
-        (r'nNumberOfLinks', _DWORD),
-        (r'nFileIndexHigh', _DWORD),
-        (r'nFileIndexLow', _DWORD),
+        ('dwFileAttributes', _DWORD),
+        ('ftCreationTime', _FILETIME),
+        ('ftLastAccessTime', _FILETIME),
+        ('ftLastWriteTime', _FILETIME),
+        ('dwVolumeSerialNumber', _DWORD),
+        ('nFileSizeHigh', _DWORD),
+        ('nFileSizeLow', _DWORD),
+        ('nNumberOfLinks', _DWORD),
+        ('nFileIndexHigh', _DWORD),
+        ('nFileIndexLow', _DWORD),
     ]
 
 
@@ -97,33 +97,33 @@
 
 class _STARTUPINFO(ctypes.Structure):
     _fields_ = [
-        (r'cb', _DWORD),
-        (r'lpReserved', _LPSTR),
-        (r'lpDesktop', _LPSTR),
-        (r'lpTitle', _LPSTR),
-        (r'dwX', _DWORD),
-        (r'dwY', _DWORD),
-        (r'dwXSize', _DWORD),
-        (r'dwYSize', _DWORD),
-        (r'dwXCountChars', _DWORD),
-        (r'dwYCountChars', _DWORD),
-        (r'dwFillAttribute', _DWORD),
-        (r'dwFlags', _DWORD),
-        (r'wShowWindow', _WORD),
-        (r'cbReserved2', _WORD),
-        (r'lpReserved2', ctypes.c_char_p),
-        (r'hStdInput', _HANDLE),
-        (r'hStdOutput', _HANDLE),
-        (r'hStdError', _HANDLE),
+        ('cb', _DWORD),
+        ('lpReserved', _LPSTR),
+        ('lpDesktop', _LPSTR),
+        ('lpTitle', _LPSTR),
+        ('dwX', _DWORD),
+        ('dwY', _DWORD),
+        ('dwXSize', _DWORD),
+        ('dwYSize', _DWORD),
+        ('dwXCountChars', _DWORD),
+        ('dwYCountChars', _DWORD),
+        ('dwFillAttribute', _DWORD),
+        ('dwFlags', _DWORD),
+        ('wShowWindow', _WORD),
+        ('cbReserved2', _WORD),
+        ('lpReserved2', ctypes.c_char_p),
+        ('hStdInput', _HANDLE),
+        ('hStdOutput', _HANDLE),
+        ('hStdError', _HANDLE),
     ]
 
 
 class _PROCESS_INFORMATION(ctypes.Structure):
     _fields_ = [
-        (r'hProcess', _HANDLE),
-        (r'hThread', _HANDLE),
-        (r'dwProcessId', _DWORD),
-        (r'dwThreadId', _DWORD),
+        ('hProcess', _HANDLE),
+        ('hThread', _HANDLE),
+        ('dwProcessId', _DWORD),
+        ('dwThreadId', _DWORD),
     ]
 
 
@@ -132,25 +132,25 @@
 
 
 class _COORD(ctypes.Structure):
-    _fields_ = [(r'X', ctypes.c_short), (r'Y', ctypes.c_short)]
+    _fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
 
 
 class _SMALL_RECT(ctypes.Structure):
     _fields_ = [
-        (r'Left', ctypes.c_short),
-        (r'Top', ctypes.c_short),
-        (r'Right', ctypes.c_short),
-        (r'Bottom', ctypes.c_short),
+        ('Left', ctypes.c_short),
+        ('Top', ctypes.c_short),
+        ('Right', ctypes.c_short),
+        ('Bottom', ctypes.c_short),
     ]
 
 
 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
     _fields_ = [
-        (r'dwSize', _COORD),
-        (r'dwCursorPosition', _COORD),
-        (r'wAttributes', _WORD),
-        (r'srWindow', _SMALL_RECT),
-        (r'dwMaximumWindowSize', _COORD),
+        ('dwSize', _COORD),
+        ('dwCursorPosition', _COORD),
+        ('wAttributes', _WORD),
+        ('srWindow', _SMALL_RECT),
+        ('dwMaximumWindowSize', _COORD),
     ]
 
 
@@ -167,39 +167,39 @@
 # These structs are only complete enough to achieve what we need.
 class CERT_CHAIN_CONTEXT(ctypes.Structure):
     _fields_ = (
-        (r"cbSize", _DWORD),
+        ("cbSize", _DWORD),
         # CERT_TRUST_STATUS struct
-        (r"dwErrorStatus", _DWORD),
-        (r"dwInfoStatus", _DWORD),
-        (r"cChain", _DWORD),
-        (r"rgpChain", ctypes.c_void_p),
-        (r"cLowerQualityChainContext", _DWORD),
-        (r"rgpLowerQualityChainContext", ctypes.c_void_p),
-        (r"fHasRevocationFreshnessTime", _BOOL),
-        (r"dwRevocationFreshnessTime", _DWORD),
+        ("dwErrorStatus", _DWORD),
+        ("dwInfoStatus", _DWORD),
+        ("cChain", _DWORD),
+        ("rgpChain", ctypes.c_void_p),
+        ("cLowerQualityChainContext", _DWORD),
+        ("rgpLowerQualityChainContext", ctypes.c_void_p),
+        ("fHasRevocationFreshnessTime", _BOOL),
+        ("dwRevocationFreshnessTime", _DWORD),
     )
 
 
 class CERT_USAGE_MATCH(ctypes.Structure):
     _fields_ = (
-        (r"dwType", _DWORD),
+        ("dwType", _DWORD),
         # CERT_ENHKEY_USAGE struct
-        (r"cUsageIdentifier", _DWORD),
-        (r"rgpszUsageIdentifier", ctypes.c_void_p),  # LPSTR *
+        ("cUsageIdentifier", _DWORD),
+        ("rgpszUsageIdentifier", ctypes.c_void_p),  # LPSTR *
     )
 
 
 class CERT_CHAIN_PARA(ctypes.Structure):
     _fields_ = (
-        (r"cbSize", _DWORD),
-        (r"RequestedUsage", CERT_USAGE_MATCH),
-        (r"RequestedIssuancePolicy", CERT_USAGE_MATCH),
-        (r"dwUrlRetrievalTimeout", _DWORD),
-        (r"fCheckRevocationFreshnessTime", _BOOL),
-        (r"dwRevocationFreshnessTime", _DWORD),
-        (r"pftCacheResync", ctypes.c_void_p),  # LPFILETIME
-        (r"pStrongSignPara", ctypes.c_void_p),  # PCCERT_STRONG_SIGN_PARA
-        (r"dwStrongSignFlags", _DWORD),
+        ("cbSize", _DWORD),
+        ("RequestedUsage", CERT_USAGE_MATCH),
+        ("RequestedIssuancePolicy", CERT_USAGE_MATCH),
+        ("dwUrlRetrievalTimeout", _DWORD),
+        ("fCheckRevocationFreshnessTime", _BOOL),
+        ("dwRevocationFreshnessTime", _DWORD),
+        ("pftCacheResync", ctypes.c_void_p),  # LPFILETIME
+        ("pStrongSignPara", ctypes.c_void_p),  # PCCERT_STRONG_SIGN_PARA
+        ("dwStrongSignFlags", _DWORD),
     )
 
 
@@ -359,7 +359,7 @@
         code -= 2 ** 32
     err = ctypes.WinError(code=code)
     raise OSError(
-        err.errno, r'%s: %s' % (encoding.strfromlocal(name), err.strerror)
+        err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
     )
 
 
@@ -732,7 +732,7 @@
             if e.errno != errno.EEXIST:
                 raise
     else:
-        raise IOError(errno.EEXIST, r"No usable temporary filename found")
+        raise IOError(errno.EEXIST, "No usable temporary filename found")
 
     try:
         os.unlink(temp)
--- a/mercurial/windows.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/windows.py	Tue Jan 21 13:14:51 2020 -0500
@@ -8,6 +8,7 @@
 from __future__ import absolute_import
 
 import errno
+import getpass
 import msvcrt
 import os
 import re
@@ -26,13 +27,14 @@
 )
 
 try:
-    import _winreg as winreg
+    import _winreg as winreg  # pytype: disable=import-error
 
     winreg.CloseKey
 except ImportError:
-    import winreg
+    # py2 only
+    import winreg  # pytype: disable=import-error
 
-osutil = policy.importmod(r'osutil')
+osutil = policy.importmod('osutil')
 
 getfsmountpoint = win32.getvolumename
 getfstype = win32.getfstype
@@ -70,8 +72,8 @@
     OPWRITE = 2
 
     def __init__(self, fp):
-        object.__setattr__(self, r'_fp', fp)
-        object.__setattr__(self, r'_lastop', 0)
+        object.__setattr__(self, '_fp', fp)
+        object.__setattr__(self, '_lastop', 0)
 
     def __enter__(self):
         self._fp.__enter__()
@@ -90,42 +92,42 @@
         self._fp.seek(0, os.SEEK_CUR)
 
     def seek(self, *args, **kwargs):
-        object.__setattr__(self, r'_lastop', self.OPNONE)
+        object.__setattr__(self, '_lastop', self.OPNONE)
         return self._fp.seek(*args, **kwargs)
 
     def write(self, d):
         if self._lastop == self.OPREAD:
             self._noopseek()
 
-        object.__setattr__(self, r'_lastop', self.OPWRITE)
+        object.__setattr__(self, '_lastop', self.OPWRITE)
         return self._fp.write(d)
 
     def writelines(self, *args, **kwargs):
         if self._lastop == self.OPREAD:
             self._noopeseek()
 
-        object.__setattr__(self, r'_lastop', self.OPWRITE)
+        object.__setattr__(self, '_lastop', self.OPWRITE)
         return self._fp.writelines(*args, **kwargs)
 
     def read(self, *args, **kwargs):
         if self._lastop == self.OPWRITE:
             self._noopseek()
 
-        object.__setattr__(self, r'_lastop', self.OPREAD)
+        object.__setattr__(self, '_lastop', self.OPREAD)
         return self._fp.read(*args, **kwargs)
 
     def readline(self, *args, **kwargs):
         if self._lastop == self.OPWRITE:
             self._noopseek()
 
-        object.__setattr__(self, r'_lastop', self.OPREAD)
+        object.__setattr__(self, '_lastop', self.OPREAD)
         return self._fp.readline(*args, **kwargs)
 
     def readlines(self, *args, **kwargs):
         if self._lastop == self.OPWRITE:
             self._noopseek()
 
-        object.__setattr__(self, r'_lastop', self.OPREAD)
+        object.__setattr__(self, '_lastop', self.OPREAD)
         return self._fp.readlines(*args, **kwargs)
 
 
@@ -176,7 +178,7 @@
     except WindowsError as err:
         # convert to a friendlier exception
         raise IOError(
-            err.errno, r'%s: %s' % (encoding.strfromlocal(name), err.strerror)
+            err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror)
         )
 
 
@@ -215,7 +217,7 @@
             if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst):
                 raise
             self.close()
-            raise IOError(errno.EPIPE, r'Broken pipe')
+            raise IOError(errno.EPIPE, 'Broken pipe')
 
     def flush(self):
         try:
@@ -223,19 +225,11 @@
         except IOError as inst:
             if not win32.lasterrorwaspipeerror(inst):
                 raise
-            raise IOError(errno.EPIPE, r'Broken pipe')
-
-
-def _is_win_9x():
-    '''return true if run on windows 95, 98 or me.'''
-    try:
-        return sys.getwindowsversion()[3] == 1
-    except AttributeError:
-        return b'command' in encoding.environ.get(b'comspec', b'')
+            raise IOError(errno.EPIPE, 'Broken pipe')
 
 
 def openhardlinks():
-    return not _is_win_9x()
+    return True
 
 
 def parsepatchoutput(output_line):
@@ -282,7 +276,7 @@
     # fileno(), usually set to -1.
     fno = getattr(fd, 'fileno', None)
     if fno is not None and fno() >= 0:
-        msvcrt.setmode(fno(), os.O_BINARY)
+        msvcrt.setmode(fno(), os.O_BINARY)  # pytype: disable=module-attr
 
 
 def pconvert(path):
@@ -506,7 +500,7 @@
         pathexts = [b'']
 
     def findexisting(pathcommand):
-        b'Will append extension (if needed) and return existing file'
+        """Will append extension (if needed) and return existing file"""
         for ext in pathexts:
             executable = pathcommand + ext
             if os.path.exists(executable):
@@ -562,6 +556,8 @@
     """Return the name of the user with the given uid.
 
     If uid is None, return the name of the current user."""
+    if not uid:
+        return pycompat.fsencode(getpass.getuser())
     return None
 
 
@@ -686,4 +682,4 @@
 
 
 def bindunixsocket(sock, path):
-    raise NotImplementedError(r'unsupported platform')
+    raise NotImplementedError('unsupported platform')
--- a/mercurial/wireprotoframing.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/wireprotoframing.py	Tue Jan 21 13:14:51 2020 -0500
@@ -118,7 +118,7 @@
     FRAME_TYPE_STREAM_SETTINGS: FLAGS_STREAM_ENCODING_SETTINGS,
 }
 
-ARGUMENT_RECORD_HEADER = struct.Struct(r'<HH')
+ARGUMENT_RECORD_HEADER = struct.Struct('<HH')
 
 
 def humanflags(mapping, value):
@@ -191,9 +191,9 @@
     # 4 bits type
     # 4 bits flags
 
-    l = struct.pack(r'<I', len(payload))
+    l = struct.pack('<I', len(payload))
     frame[0:3] = l[0:3]
-    struct.pack_into(r'<HBB', frame, 3, requestid, streamid, streamflags)
+    struct.pack_into('<HBB', frame, 3, requestid, streamid, streamflags)
     frame[7] = (typeid << 4) | flags
     frame[8:] = payload
 
@@ -280,7 +280,7 @@
     # 4 bits frame flags
     # ... payload
     framelength = data[0] + 256 * data[1] + 16384 * data[2]
-    requestid, streamid, streamflags = struct.unpack_from(r'<HBB', data, 3)
+    requestid, streamid, streamflags = struct.unpack_from('<HBB', data, 3)
     typeflags = data[7]
 
     frametype = (typeflags & 0xF0) >> 4
@@ -460,11 +460,11 @@
     }
 
     for a in (
-        r'size',
-        r'fullhashes',
-        r'fullhashseed',
-        r'serverdercerts',
-        r'servercadercerts',
+        'size',
+        'fullhashes',
+        'fullhashseed',
+        'serverdercerts',
+        'servercadercerts',
     ):
         value = getattr(location, a)
         if value is not None:
@@ -548,15 +548,13 @@
                 raise ValueError(b'must use bytes for labels')
 
         # Formatting string must be ASCII.
-        formatting = formatting.decode(r'ascii', r'replace').encode(r'ascii')
+        formatting = formatting.decode('ascii', 'replace').encode('ascii')
 
         # Arguments must be UTF-8.
-        args = [a.decode(r'utf-8', r'replace').encode(r'utf-8') for a in args]
+        args = [a.decode('utf-8', 'replace').encode('utf-8') for a in args]
 
         # Labels must be ASCII.
-        labels = [
-            l.decode(r'ascii', r'strict').encode(r'ascii') for l in labels
-        ]
+        labels = [l.decode('ascii', 'strict').encode('ascii') for l in labels]
 
         atom = {b'msg': formatting}
         if args:
--- a/mercurial/wireprototypes.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/wireprototypes.py	Tue Jan 21 13:14:51 2020 -0500
@@ -404,7 +404,7 @@
             )
             % config,
             hint=_(b'usable compression engines: %s')
-            % b', '.sorted(validnames),
+            % b', '.sorted(validnames),  # pytype: disable=attribute-error
         )
 
     return compengines
--- a/mercurial/wireprotov1peer.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/wireprotov1peer.py	Tue Jan 21 13:14:51 2020 -0500
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import hashlib
 import sys
 import weakref
 
@@ -31,6 +30,7 @@
     repository,
     util as interfaceutil,
 )
+from .utils import hashutil
 
 urlreq = util.urlreq
 
@@ -489,7 +489,7 @@
 
         if heads != [b'force'] and self.capable(b'unbundlehash'):
             heads = wireprototypes.encodelist(
-                [b'hashed', hashlib.sha1(b''.join(sorted(heads))).digest()]
+                [b'hashed', hashutil.sha1(b''.join(sorted(heads))).digest()]
             )
         else:
             heads = wireprototypes.encodelist(heads)
@@ -599,9 +599,9 @@
         # don't pass optional arguments left at their default value
         opts = {}
         if three is not None:
-            opts[r'three'] = three
+            opts['three'] = three
         if four is not None:
-            opts[r'four'] = four
+            opts['four'] = four
         return self._call(b'debugwireargs', one=one, two=two, **opts)
 
     def _call(self, cmd, **args):
--- a/mercurial/wireprotov1server.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/wireprotov1server.py	Tue Jan 21 13:14:51 2020 -0500
@@ -679,7 +679,7 @@
             if not getattr(exc, 'duringunbundle2', False):
                 try:
                     raise
-                except error.Abort:
+                except error.Abort as exc:
                     # The old code we moved used procutil.stderr directly.
                     # We did not change it to minimise code change.
                     # This need to be moved to something proper.
--- a/mercurial/wireprotov2peer.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/wireprotov2peer.py	Tue Jan 21 13:14:51 2020 -0500
@@ -472,7 +472,7 @@
             )
 
         headers = {
-            r'Accept': redirect.mediatype,
+            'Accept': redirect.mediatype,
         }
 
         req = self._requestbuilder(pycompat.strurl(redirect.url), None, headers)
--- a/mercurial/wireprotov2server.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/wireprotov2server.py	Tue Jan 21 13:14:51 2020 -0500
@@ -8,7 +8,6 @@
 
 import collections
 import contextlib
-import hashlib
 
 from .i18n import _
 from .node import (
@@ -31,6 +30,7 @@
 from .interfaces import util as interfaceutil
 from .utils import (
     cborutil,
+    hashutil,
     stringutil,
 )
 
@@ -858,7 +858,7 @@
 
         cacher.adjustcachekeystate(state)
 
-        hasher = hashlib.sha1()
+        hasher = hashutil.sha1()
         for chunk in cborutil.streamencode(state):
             hasher.update(chunk)
 
--- a/mercurial/worker.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/mercurial/worker.py	Tue Jan 21 13:14:51 2020 -0500
@@ -36,7 +36,7 @@
 
     # posix
     try:
-        n = int(os.sysconf(r'SC_NPROCESSORS_ONLN'))
+        n = int(os.sysconf('SC_NPROCESSORS_ONLN'))
         if n > 0:
             return n
     except (AttributeError, ValueError):
@@ -226,7 +226,7 @@
     selector = selectors.DefaultSelector()
     for rfd, wfd in pipes:
         os.close(wfd)
-        selector.register(os.fdopen(rfd, r'rb', 0), selectors.EVENT_READ)
+        selector.register(os.fdopen(rfd, 'rb', 0), selectors.EVENT_READ)
 
     def cleanup():
         signal.signal(signal.SIGINT, oldhandler)
--- a/relnotes/next	Thu Jan 09 14:19:20 2020 -0500
+++ b/relnotes/next	Tue Jan 21 13:14:51 2020 -0500
@@ -1,14 +1,35 @@
 == New Features ==
 
+ * Windows will process hgrc files in %PROGRAMDATA%\Mercurial\hgrc.d.
+
 
 == New Experimental Features ==
 
 
 == Bug Fixes  ==
 
+ * The `indent()` template function was documented to not indent empty lines,
+   but it still indented the first line even if it was empty. It no longer does
+   that.
 
 == Backwards Compatibility Changes ==
 
 
 == Internal API Changes ==
 
+ * Matcher instances no longer have a `explicitdir` property. Consider
+   rewriting your code to use `repo.wvfs.isdir()` and/or
+   `ctx.hasdir()` instead. Also, the `traversedir` property is now
+   also called when only `explicitdir` used to be called. That may
+   mean that you can simply remove the use of `explicitdir` if you
+   were already using `traversedir`.
+
+ * The `revlog.nodemap` object have been merged into the `revlog.index` object.
+   * `n in revlog.nodemap` becomes `revlog.index.has_node(n)`,
+   * `revlog.nodemap[n]` becomes `revlog.index.rev(n)`,
+   * `revlog.nodemap.get(n)` becomes `revlog.index.get_rev(n)`.
+
+ * `copies.duplicatecopies()` was renamed to
+   `copies.graftcopies()`. Its arguments changed from revision numbers
+   to context objects. It also lost its `repo` and `skip` arguments
+   (they should no longer be needed).
--- a/rust/Cargo.lock	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/Cargo.lock	Tue Jan 21 13:14:51 2020 -0500
@@ -32,6 +32,15 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "c2-chacha"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "cfg-if"
 version = "0.1.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -105,6 +114,16 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "getrandom"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "hg-core"
 version = "0.1.0"
 dependencies = [
@@ -115,6 +134,7 @@
  "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -179,6 +199,11 @@
 ]
 
 [[package]]
+name = "ppv-lite86"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "python27-sys"
 version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -215,6 +240,18 @@
 ]
 
 [[package]]
+name = "rand"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "rand_chacha"
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -224,6 +261,15 @@
 ]
 
 [[package]]
+name = "rand_chacha"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "rand_core"
 version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -237,6 +283,14 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "rand_core"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "rand_hc"
 version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -245,6 +299,14 @@
 ]
 
 [[package]]
+name = "rand_hc"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "rand_isaac"
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -373,6 +435,19 @@
 ]
 
 [[package]]
+name = "twox-hash"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "wasi"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "winapi"
 version = "0.3.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -397,6 +472,7 @@
 "checksum autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875"
 "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
 "checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5"
+"checksum c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7d64d04786e0f528460fc884753cf8dddcc466be308f6026f8e355c41a0e4101"
 "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
 "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
 "checksum cpython 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "85532c648315aeb0829ad216a6a29aa3212cf9319bc7f6daf1404aa0bdd1485f"
@@ -406,6 +482,7 @@
 "checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
 "checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
 "checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
+"checksum getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "473a1265acc8ff1e808cd0a1af8cee3c2ee5200916058a2ca113c29f2d903571"
 "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
 "checksum libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)" = "74dfca3d9957906e8d1e6a0b641dc9a59848e793f1da2165889fd4f62d10d79c"
 "checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e"
@@ -413,13 +490,18 @@
 "checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
 "checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32"
 "checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273"
+"checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b"
 "checksum python27-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "372555e88a6bc8109eb641380240dc8d25a128fc48363ec9075664daadffdd5b"
 "checksum python3-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f3a8ebed3f1201fda179f3960609dbbc10cd8c75e9f2afcb03788278f367d8ea"
 "checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
+"checksum rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412"
 "checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
+"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853"
 "checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
 "checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
+"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
 "checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4"
+"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
 "checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08"
 "checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b"
 "checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
@@ -435,6 +517,8 @@
 "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
 "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
 "checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
+"checksum twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56"
+"checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d"
 "checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
 "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- a/rust/README.rst	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/README.rst	Tue Jan 21 13:14:51 2020 -0500
@@ -3,76 +3,50 @@
 ===================
 
 This directory contains various Rust code for the Mercurial project.
+Rust is not required to use (or build) Mercurial, but using it
+improves performance in some areas.
 
-The top-level ``Cargo.toml`` file defines a workspace containing
-all primary Mercurial crates.
+There are currently three independent rust projects:
+- chg. An implementation of chg, in rust instead of C.
+- hgcli. A experiment for starting hg in rust rather than in python,
+  by linking with the python runtime. Probably meant to be replaced by
+  PyOxidizer at some point.
+- hg-core (and hg-cpython/hg-directffi): implementation of some
+  functionality of mercurial in rust, e.g. ancestry computations in
+  revision graphs or pull discovery. The top-level ``Cargo.toml`` file
+  defines a workspace containing these crates.
+
+Using hg-core
+=============
 
-Building
-========
+Local use (you need to clean previous build artifacts if you have
+built without rust previously)::
 
-To build the Rust components::
+  $ HGWITHRUSTEXT=cpython make local # to use ./hg
+  $ HGWITHRUSTEXT=cpython make tests # to run all tests
+  $ (cd tests; HGWITHRUSTEXT=cpython ./run-tests.py) # only the .t
+  $ ./hg debuginstall | grep rust # to validate rust is in use
+  checking module policy (rust+c-allow)
 
-   $ cargo build
+Setting ``HGWITHRUSTEXT`` to other values like ``true`` is deprecated
+and enables only a fraction of the rust code.
 
-If you prefer a non-debug / release configuration::
+Developing hg-core
+==================
+
+Simply run::
 
    $ cargo build --release
 
-Features
---------
-
-The following Cargo features are available:
-
-localdev (default)
-   Produce files that work with an in-source-tree build.
-
-   In this mode, the build finds and uses a ``python2.7`` binary from
-   ``PATH``. The ``hg`` binary assumes it runs from ``rust/target/<target>hg``
-   and it finds Mercurial files at ``dirname($0)/../../../``.
+It is possible to build without ``--release``, but it is not
+recommended if performance is of any interest: there can be an order
+of magnitude of degradation when removing ``--release``.
 
-Build Mechanism
----------------
-
-The produced ``hg`` binary is *bound* to a CPython installation. The
-binary links against and loads a CPython library that is discovered
-at build time (by a ``build.rs`` Cargo build script). The Python
-standard library defined by this CPython installation is also used.
-
-Finding the appropriate CPython installation to use is done by
-the ``python27-sys`` crate's ``build.rs``. Its search order is::
-
-1. ``PYTHON_SYS_EXECUTABLE`` environment variable.
-2. ``python`` executable on ``PATH``
-3. ``python2`` executable on ``PATH``
-4. ``python2.7`` executable on ``PATH``
+For faster builds, you may want to skip code generation::
 
-Additional verification of the found Python will be performed by our
-``build.rs`` to ensure it meets Mercurial's requirements.
-
-Details about the build-time configured Python are built into the
-produced ``hg`` binary. This means that a built ``hg`` binary is only
-suitable for a specific, well-defined role. These roles are controlled
-by Cargo features (see above).
-
-Running
-=======
-
-The ``hgcli`` crate produces an ``hg`` binary. You can run this binary
-via ``cargo run``::
+  $ cargo check
 
-   $ cargo run --manifest-path hgcli/Cargo.toml
-
-Or directly::
-
-   $ target/debug/hg
-   $ target/release/hg
+You can run only the rust-specific tests (as opposed to tests of
+mercurial as a whole) with::
 
-You can also run the test harness with this binary::
-
-   $ ./run-tests.py --with-hg ../rust/target/debug/hg
-
-.. note::
-
-   Integration with the test harness is still preliminary. Remember to
-   ``cargo build`` after changes because the test harness doesn't yet
-   automatically build Rust code.
+  $ cargo test --all
--- a/rust/chg/src/attachio.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/chg/src/attachio.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -8,9 +8,9 @@
 use futures::{Async, Future, Poll};
 use std::io;
 use std::os::unix::io::AsRawFd;
-use tokio_hglib::{Client, Connection};
 use tokio_hglib::codec::ChannelMessage;
 use tokio_hglib::protocol::MessageLoop;
+use tokio_hglib::{Client, Connection};
 
 use super::message;
 use super::procutil;
@@ -28,7 +28,8 @@
 /// dispose of the client-side handle once attached.
 #[must_use = "futures do nothing unless polled"]
 pub struct AttachIo<C, I, O, E>
-    where C: Connection,
+where
+    C: Connection,
 {
     msg_loop: MessageLoop<C>,
     stdin: I,
@@ -37,23 +38,34 @@
 }
 
 impl<C, I, O, E> AttachIo<C, I, O, E>
-    where C: Connection + AsRawFd,
-          I: AsRawFd,
-          O: AsRawFd,
-          E: AsRawFd,
+where
+    C: Connection + AsRawFd,
+    I: AsRawFd,
+    O: AsRawFd,
+    E: AsRawFd,
 {
-    pub fn with_client(client: Client<C>, stdin: I, stdout: O, stderr: Option<E>)
-                       -> AttachIo<C, I, O, E> {
+    pub fn with_client(
+        client: Client<C>,
+        stdin: I,
+        stdout: O,
+        stderr: Option<E>,
+    ) -> AttachIo<C, I, O, E> {
         let msg_loop = MessageLoop::start(client, b"attachio");
-        AttachIo { msg_loop, stdin, stdout, stderr }
+        AttachIo {
+            msg_loop,
+            stdin,
+            stdout,
+            stderr,
+        }
     }
 }
 
 impl<C, I, O, E> Future for AttachIo<C, I, O, E>
-    where C: Connection + AsRawFd,
-          I: AsRawFd,
-          O: AsRawFd,
-          E: AsRawFd,
+where
+    C: Connection + AsRawFd,
+    I: AsRawFd,
+    O: AsRawFd,
+    E: AsRawFd,
 {
     type Item = Client<C>;
     type Error = io::Error;
@@ -67,8 +79,10 @@
                     if fd_cnt == 3 {
                         return Ok(Async::Ready(client));
                     } else {
-                        return Err(io::Error::new(io::ErrorKind::InvalidData,
-                                                  "unexpected attachio result"));
+                        return Err(io::Error::new(
+                            io::ErrorKind::InvalidData,
+                            "unexpected attachio result",
+                        ));
                     }
                 }
                 ChannelMessage::Data(..) => {
@@ -86,10 +100,13 @@
                     procutil::send_raw_fds(sock_fd, &[ifd, ofd, efd])?;
                     self.msg_loop = MessageLoop::resume(client);
                 }
-                ChannelMessage::InputRequest(..) | ChannelMessage::LineRequest(..) |
-                ChannelMessage::SystemRequest(..) => {
-                    return Err(io::Error::new(io::ErrorKind::InvalidData,
-                                              "unsupported request while attaching io"));
+                ChannelMessage::InputRequest(..)
+                | ChannelMessage::LineRequest(..)
+                | ChannelMessage::SystemRequest(..) => {
+                    return Err(io::Error::new(
+                        io::ErrorKind::InvalidData,
+                        "unsupported request while attaching io",
+                    ));
                 }
             }
         }
--- a/rust/chg/src/clientext.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/chg/src/clientext.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -9,8 +9,8 @@
 use std::os::unix::ffi::OsStrExt;
 use std::os::unix::io::AsRawFd;
 use std::path::Path;
+use tokio_hglib::protocol::OneShotRequest;
 use tokio_hglib::{Client, Connection};
-use tokio_hglib::protocol::OneShotRequest;
 
 use super::attachio::AttachIo;
 use super::message;
@@ -18,46 +18,54 @@
 use super::uihandler::SystemHandler;
 
 pub trait ChgClientExt<C>
-    where C: Connection + AsRawFd,
+where
+    C: Connection + AsRawFd,
 {
     /// Attaches the client file descriptors to the server.
     fn attach_io<I, O, E>(self, stdin: I, stdout: O, stderr: E) -> AttachIo<C, I, O, E>
-        where I: AsRawFd,
-              O: AsRawFd,
-              E: AsRawFd;
+    where
+        I: AsRawFd,
+        O: AsRawFd,
+        E: AsRawFd;
 
     /// Changes the working directory of the server.
     fn set_current_dir<P>(self, dir: P) -> OneShotRequest<C>
-        where P: AsRef<Path>;
+    where
+        P: AsRef<Path>;
 
     /// Runs the specified Mercurial command with cHg extension.
     fn run_command_chg<I, P, H>(self, handler: H, args: I) -> ChgRunCommand<C, H>
-        where I: IntoIterator<Item = P>,
-              P: AsRef<OsStr>,
-              H: SystemHandler;
+    where
+        I: IntoIterator<Item = P>,
+        P: AsRef<OsStr>,
+        H: SystemHandler;
 }
 
 impl<C> ChgClientExt<C> for Client<C>
-    where C: Connection + AsRawFd,
+where
+    C: Connection + AsRawFd,
 {
     fn attach_io<I, O, E>(self, stdin: I, stdout: O, stderr: E) -> AttachIo<C, I, O, E>
-        where I: AsRawFd,
-              O: AsRawFd,
-              E: AsRawFd,
+    where
+        I: AsRawFd,
+        O: AsRawFd,
+        E: AsRawFd,
     {
         AttachIo::with_client(self, stdin, stdout, Some(stderr))
     }
 
     fn set_current_dir<P>(self, dir: P) -> OneShotRequest<C>
-        where P: AsRef<Path>,
+    where
+        P: AsRef<Path>,
     {
         OneShotRequest::start_with_args(self, b"chdir", dir.as_ref().as_os_str().as_bytes())
     }
 
     fn run_command_chg<I, P, H>(self, handler: H, args: I) -> ChgRunCommand<C, H>
-        where I: IntoIterator<Item = P>,
-              P: AsRef<OsStr>,
-              H: SystemHandler,
+    where
+        I: IntoIterator<Item = P>,
+        P: AsRef<OsStr>,
+        H: SystemHandler,
     {
         ChgRunCommand::with_client(self, handler, message::pack_args_os(args))
     }
--- a/rust/chg/src/locator.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/chg/src/locator.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -91,11 +91,16 @@
 /// Determines the default hg command.
 pub fn default_hg_command() -> OsString {
     // TODO: maybe allow embedding the path at compile time (or load from hgrc)
-    env::var_os("CHGHG").or(env::var_os("HG")).unwrap_or(OsStr::new("hg").to_owned())
+    env::var_os("CHGHG")
+        .or(env::var_os("HG"))
+        .unwrap_or(OsStr::new("hg").to_owned())
 }
 
 fn default_timeout() -> Duration {
-    let secs = env::var("CHGTIMEOUT").ok().and_then(|s| s.parse().ok()).unwrap_or(60);
+    let secs = env::var("CHGTIMEOUT")
+        .ok()
+        .and_then(|s| s.parse().ok())
+        .unwrap_or(60);
     Duration::from_secs(secs)
 }
 
@@ -103,19 +108,24 @@
 ///
 /// If the directory already exists, tests its permission.
 fn create_secure_dir<P>(path: P) -> io::Result<()>
-    where P: AsRef<Path>,
+where
+    P: AsRef<Path>,
 {
-    DirBuilder::new().mode(0o700).create(path.as_ref()).or_else(|err| {
-        if err.kind() == io::ErrorKind::AlreadyExists {
-            check_secure_dir(path).map(|_| ())
-        } else {
-            Err(err)
-        }
-    })
+    DirBuilder::new()
+        .mode(0o700)
+        .create(path.as_ref())
+        .or_else(|err| {
+            if err.kind() == io::ErrorKind::AlreadyExists {
+                check_secure_dir(path).map(|_| ())
+            } else {
+                Err(err)
+            }
+        })
 }
 
 fn check_secure_dir<P>(path: P) -> io::Result<P>
-    where P: AsRef<Path>,
+where
+    P: AsRef<Path>,
 {
     let a = fs::symlink_metadata(path.as_ref())?;
     if a.is_dir() && a.uid() == procutil::get_effective_uid() && (a.mode() & 0o777) == 0o700 {
--- a/rust/chg/src/main.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/chg/src/main.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -9,9 +9,9 @@
 extern crate tokio;
 extern crate tokio_hglib;
 
-use chg::{ChgClientExt, ChgUiHandler};
 use chg::locator;
 use chg::procutil;
+use chg::{ChgClientExt, ChgUiHandler};
 use futures::sync::oneshot;
 use std::env;
 use std::io;
@@ -42,13 +42,19 @@
             // just make the output looks similar to chg of C
             let l = format!("{}", record.level()).to_lowercase();
             let t = self.start.elapsed();
-            writeln!(io::stderr(), "chg: {}: {}.{:06} {}",
-                     l, t.as_secs(), t.subsec_micros(), record.args()).unwrap_or(());
+            writeln!(
+                io::stderr(),
+                "chg: {}: {}.{:06} {}",
+                l,
+                t.as_secs(),
+                t.subsec_micros(),
+                record.args()
+            )
+            .unwrap_or(());
         }
     }
 
-    fn flush(&self) {
-    }
+    fn flush(&self) {}
 }
 
 fn main() {
@@ -71,28 +77,24 @@
     let handler = ChgUiHandler::new();
     let (result_tx, result_rx) = oneshot::channel();
     let fut = UnixClient::connect(sock_path)
-        .and_then(|client| {
-            client.set_current_dir(current_dir)
-        })
-        .and_then(|client| {
-            client.attach_io(io::stdin(), io::stdout(), io::stderr())
-        })
+        .and_then(|client| client.set_current_dir(current_dir))
+        .and_then(|client| client.attach_io(io::stdin(), io::stdout(), io::stderr()))
         .and_then(|client| {
             let pid = client.server_spec().process_id.unwrap();
             let pgid = client.server_spec().process_group_id;
             procutil::setup_signal_handler_once(pid, pgid)?;
             Ok(client)
         })
-        .and_then(|client| {
-            client.run_command_chg(handler, env::args_os().skip(1))
-        })
+        .and_then(|client| client.run_command_chg(handler, env::args_os().skip(1)))
         .map(|(_client, _handler, code)| {
             procutil::restore_signal_handler_once()?;
             Ok(code)
         })
-        .or_else(|err| Ok(Err(err)))  // pass back error to caller
+        .or_else(|err| Ok(Err(err))) // pass back error to caller
         .map(|res| result_tx.send(res).unwrap());
     tokio::run(fut);
-    result_rx.wait().unwrap_or(Err(io::Error::new(io::ErrorKind::Other,
-                                                  "no exit code set")))
+    result_rx.wait().unwrap_or(Err(io::Error::new(
+        io::ErrorKind::Other,
+        "no exit code set",
+    )))
 }
--- a/rust/chg/src/message.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/chg/src/message.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -11,7 +11,7 @@
 use std::io;
 use std::os::unix::ffi::OsStrExt;
 
-pub use tokio_hglib::message::*;  // re-exports
+pub use tokio_hglib::message::*; // re-exports
 
 /// Shell command type requested by the server.
 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
@@ -42,7 +42,10 @@
         let mut s = l.splitn(2, |&c| c == b'=');
         let k = s.next().unwrap();
         let v = s.next().ok_or(new_parse_error("malformed env"))?;
-        envs.push((OsStr::from_bytes(k).to_owned(), OsStr::from_bytes(v).to_owned()));
+        envs.push((
+            OsStr::from_bytes(k).to_owned(),
+            OsStr::from_bytes(v).to_owned(),
+        ));
     }
 
     let spec = CommandSpec {
@@ -57,41 +60,54 @@
     match value {
         b"pager" => Ok(CommandType::Pager),
         b"system" => Ok(CommandType::System),
-        _ => Err(new_parse_error(format!("unknown command type: {}", decode_latin1(value)))),
+        _ => Err(new_parse_error(format!(
+            "unknown command type: {}",
+            decode_latin1(value)
+        ))),
     }
 }
 
 fn decode_latin1<S>(s: S) -> String
-    where S: AsRef<[u8]>,
+where
+    S: AsRef<[u8]>,
 {
     s.as_ref().iter().map(|&c| c as char).collect()
 }
 
 fn new_parse_error<E>(error: E) -> io::Error
-    where E: Into<Box<error::Error + Send + Sync>>,
+where
+    E: Into<Box<error::Error + Send + Sync>>,
 {
     io::Error::new(io::ErrorKind::InvalidData, error)
 }
 
 #[cfg(test)]
 mod tests {
+    use super::*;
     use std::os::unix::ffi::OsStringExt;
-    use super::*;
 
     #[test]
     fn parse_command_spec_good() {
-        let src = [b"pager".as_ref(),
-                   b"less -FRX".as_ref(),
-                   b"/tmp".as_ref(),
-                   b"LANG=C".as_ref(),
-                   b"HGPLAIN=".as_ref()].join(&0);
+        let src = [
+            b"pager".as_ref(),
+            b"less -FRX".as_ref(),
+            b"/tmp".as_ref(),
+            b"LANG=C".as_ref(),
+            b"HGPLAIN=".as_ref(),
+        ]
+        .join(&0);
         let spec = CommandSpec {
             command: os_string_from(b"less -FRX"),
             current_dir: os_string_from(b"/tmp"),
-            envs: vec![(os_string_from(b"LANG"), os_string_from(b"C")),
-                       (os_string_from(b"HGPLAIN"), os_string_from(b""))],
+            envs: vec![
+                (os_string_from(b"LANG"), os_string_from(b"C")),
+                (os_string_from(b"HGPLAIN"), os_string_from(b"")),
+            ],
         };
-        assert_eq!(parse_command_spec(Bytes::from(src)).unwrap(), (CommandType::Pager, spec));
+        assert_eq!(
+            parse_command_spec(Bytes::from(src)).unwrap(),
+            (CommandType::Pager, spec)
+        );
     }
 
     #[test]
--- a/rust/chg/src/procutil.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/chg/src/procutil.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -33,7 +33,7 @@
     }
     let r = unsafe { libc::fcntl(fd, libc::F_SETFL, flags & !libc::O_NONBLOCK) };
     if r < 0 {
-        return Err(io::Error::last_os_error())
+        return Err(io::Error::last_os_error());
     }
     Ok(())
 }
--- a/rust/chg/src/runcommand.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/chg/src/runcommand.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -11,9 +11,9 @@
 use std::io;
 use std::mem;
 use std::os::unix::io::AsRawFd;
-use tokio_hglib::{Client, Connection};
 use tokio_hglib::codec::ChannelMessage;
 use tokio_hglib::protocol::MessageLoop;
+use tokio_hglib::{Client, Connection};
 
 use super::attachio::AttachIo;
 use super::message::{self, CommandType};
@@ -26,8 +26,9 @@
 }
 
 enum CommandState<C, H>
-    where C: Connection,
-          H: SystemHandler,
+where
+    C: Connection,
+    H: SystemHandler,
 {
     Running(MessageLoop<C>, H),
     SpawningPager(Client<C>, <H::SpawnPagerResult as IntoFuture>::Future),
@@ -41,18 +42,19 @@
 /// Future resolves to `(exit_code, client)`.
 #[must_use = "futures do nothing unless polled"]
 pub struct ChgRunCommand<C, H>
-    where C: Connection,
-          H: SystemHandler,
+where
+    C: Connection,
+    H: SystemHandler,
 {
     state: CommandState<C, H>,
 }
 
 impl<C, H> ChgRunCommand<C, H>
-    where C: Connection + AsRawFd,
-          H: SystemHandler,
+where
+    C: Connection + AsRawFd,
+    H: SystemHandler,
 {
-    pub fn with_client(client: Client<C>, handler: H, packed_args: Bytes)
-                       -> ChgRunCommand<C, H> {
+    pub fn with_client(client: Client<C>, handler: H, packed_args: Bytes) -> ChgRunCommand<C, H> {
         let msg_loop = MessageLoop::start_with_args(client, b"runcommand", packed_args);
         ChgRunCommand {
             state: CommandState::Running(msg_loop, handler),
@@ -61,8 +63,9 @@
 }
 
 impl<C, H> Future for ChgRunCommand<C, H>
-    where C: Connection + AsRawFd,
-          H: SystemHandler,
+where
+    C: Connection + AsRawFd,
+    H: SystemHandler,
 {
     type Item = (Client<C>, H, i32);
     type Error = io::Error;
@@ -87,8 +90,9 @@
 }
 
 impl<C, H> CommandState<C, H>
-    where C: Connection + AsRawFd,
-          H: SystemHandler,
+where
+    C: Connection + AsRawFd,
+    H: SystemHandler,
 {
     fn poll(self) -> CommandPoll<C, H> {
         match self {
@@ -102,14 +106,16 @@
             CommandState::SpawningPager(client, mut fut) => {
                 if let Async::Ready((handler, pin)) = fut.poll()? {
                     let fut = AttachIo::with_client(client, io::stdin(), pin, None);
-                    Ok(AsyncS::PollAgain(CommandState::AttachingPager(fut, handler)))
+                    Ok(AsyncS::PollAgain(CommandState::AttachingPager(
+                        fut, handler,
+                    )))
                 } else {
                     Ok(AsyncS::NotReady(CommandState::SpawningPager(client, fut)))
                 }
             }
             CommandState::AttachingPager(mut fut, handler) => {
                 if let Async::Ready(client) = fut.poll()? {
-                    let msg_loop = MessageLoop::start(client, b"");  // terminator
+                    let msg_loop = MessageLoop::start(client, b""); // terminator
                     Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
                 } else {
                     Ok(AsyncS::NotReady(CommandState::AttachingPager(fut, handler)))
@@ -124,14 +130,15 @@
                     Ok(AsyncS::NotReady(CommandState::WaitingSystem(client, fut)))
                 }
             }
-            CommandState::Finished => panic!("poll ChgRunCommand after it's done")
+            CommandState::Finished => panic!("poll ChgRunCommand after it's done"),
         }
     }
 }
 
 fn process_message<C, H>(client: Client<C>, handler: H, msg: ChannelMessage) -> CommandPoll<C, H>
-    where C: Connection,
-          H: SystemHandler,
+where
+    C: Connection,
+    H: SystemHandler,
 {
     match msg {
         ChannelMessage::Data(b'r', data) => {
@@ -143,9 +150,10 @@
             let msg_loop = MessageLoop::resume(client);
             Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
         }
-        ChannelMessage::InputRequest(..) | ChannelMessage::LineRequest(..) => {
-            Err(io::Error::new(io::ErrorKind::InvalidData, "unsupported request"))
-        }
+        ChannelMessage::InputRequest(..) | ChannelMessage::LineRequest(..) => Err(io::Error::new(
+            io::ErrorKind::InvalidData,
+            "unsupported request",
+        )),
         ChannelMessage::SystemRequest(data) => {
             let (cmd_type, cmd_spec) = message::parse_command_spec(data)?;
             match cmd_type {
--- a/rust/chg/src/uihandler.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/chg/src/uihandler.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -3,8 +3,8 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
+use futures::future::IntoFuture;
 use futures::Future;
-use futures::future::IntoFuture;
 use std::io;
 use std::os::unix::io::AsRawFd;
 use std::os::unix::process::ExitStatusExt;
@@ -33,8 +33,7 @@
 }
 
 /// Default cHg implementation to process requests received from server.
-pub struct ChgUiHandler {
-}
+pub struct ChgUiHandler {}
 
 impl ChgUiHandler {
     pub fn new() -> ChgUiHandler {
@@ -57,7 +56,7 @@
         // otherwise the server won't get SIGPIPE if it does not write
         // anything. (issue5278)
         // kill(peerpid, SIGPIPE);
-        tokio::spawn(pager.map(|_| ()).map_err(|_| ()));  // just ignore errors
+        tokio::spawn(pager.map(|_| ()).map_err(|_| ())); // just ignore errors
         Ok((self, pin))
     }
 
@@ -67,7 +66,9 @@
             .into_future()
             .flatten()
             .map(|status| {
-                let code = status.code().or_else(|| status.signal().map(|n| -n))
+                let code = status
+                    .code()
+                    .or_else(|| status.signal().map(|n| -n))
                     .expect("either exit code or signal should be set");
                 (self, code)
             });
@@ -84,4 +85,4 @@
         .env_clear()
         .envs(spec.envs.iter().cloned());
     builder
- }
+}
--- a/rust/hg-core/Cargo.toml	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/Cargo.toml	Tue Jan 21 13:14:51 2020 -0500
@@ -14,5 +14,6 @@
 memchr = "2.2.0"
 rand = "0.6.5"
 rand_pcg = "0.1.1"
+rayon = "1.2.0"
 regex = "1.1.0"
-rayon = "1.2.0"
+twox-hash = "1.5.0"
--- a/rust/hg-core/src/dirstate.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/dirstate.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -5,9 +5,8 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
-use crate::{utils::hg_path::HgPathBuf, DirstateParseError};
+use crate::{utils::hg_path::HgPathBuf, DirstateParseError, FastHashMap};
 use std::collections::hash_map;
-use std::collections::HashMap;
 use std::convert::TryFrom;
 
 pub mod dirs_multiset;
@@ -32,9 +31,14 @@
     pub size: i32,
 }
 
-pub type StateMap = HashMap<HgPathBuf, DirstateEntry>;
+/// A `DirstateEntry` with a size of `-2` means that it was merged from the
+/// other parent. This allows revert to pick the right status back during a
+/// merge.
+pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
+
+pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>;
 pub type StateMapIter<'a> = hash_map::Iter<'a, HgPathBuf, DirstateEntry>;
-pub type CopyMap = HashMap<HgPathBuf, HgPathBuf>;
+pub type CopyMap = FastHashMap<HgPathBuf, HgPathBuf>;
 pub type CopyMapIter<'a> = hash_map::Iter<'a, HgPathBuf, HgPathBuf>;
 
 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -11,16 +11,16 @@
 use crate::utils::hg_path::{HgPath, HgPathBuf};
 use crate::{
     dirstate::EntryState, utils::files, DirstateEntry, DirstateMapError,
+    FastHashMap,
 };
 use std::collections::hash_map::{self, Entry};
-use std::collections::HashMap;
 
 // could be encapsulated if we care API stability more seriously
 pub type DirsMultisetIter<'a> = hash_map::Keys<'a, HgPathBuf, u32>;
 
 #[derive(PartialEq, Debug)]
 pub struct DirsMultiset {
-    inner: HashMap<HgPathBuf, u32>,
+    inner: FastHashMap<HgPathBuf, u32>,
 }
 
 impl DirsMultiset {
@@ -28,51 +28,62 @@
     ///
     /// If `skip_state` is provided, skips dirstate entries with equal state.
     pub fn from_dirstate(
-        vec: &HashMap<HgPathBuf, DirstateEntry>,
+        dirstate: &FastHashMap<HgPathBuf, DirstateEntry>,
         skip_state: Option<EntryState>,
-    ) -> Self {
+    ) -> Result<Self, DirstateMapError> {
         let mut multiset = DirsMultiset {
-            inner: HashMap::new(),
+            inner: FastHashMap::default(),
         };
 
-        for (filename, DirstateEntry { state, .. }) in vec {
+        for (filename, DirstateEntry { state, .. }) in dirstate {
             // This `if` is optimized out of the loop
             if let Some(skip) = skip_state {
                 if skip != *state {
-                    multiset.add_path(filename);
+                    multiset.add_path(filename)?;
                 }
             } else {
-                multiset.add_path(filename);
+                multiset.add_path(filename)?;
             }
         }
 
-        multiset
+        Ok(multiset)
     }
 
     /// Initializes the multiset from a manifest.
-    pub fn from_manifest(vec: &Vec<HgPathBuf>) -> Self {
+    pub fn from_manifest(
+        manifest: &[impl AsRef<HgPath>],
+    ) -> Result<Self, DirstateMapError> {
         let mut multiset = DirsMultiset {
-            inner: HashMap::new(),
+            inner: FastHashMap::default(),
         };
 
-        for filename in vec {
-            multiset.add_path(filename);
+        for filename in manifest {
+            multiset.add_path(filename.as_ref())?;
         }
 
-        multiset
+        Ok(multiset)
     }
 
     /// Increases the count of deepest directory contained in the path.
     ///
     /// If the directory is not yet in the map, adds its parents.
-    pub fn add_path(&mut self, path: &HgPath) {
-        for subpath in files::find_dirs(path) {
+    pub fn add_path(
+        &mut self,
+        path: impl AsRef<HgPath>,
+    ) -> Result<(), DirstateMapError> {
+        for subpath in files::find_dirs(path.as_ref()) {
+            if subpath.as_bytes().last() == Some(&b'/') {
+                // TODO Remove this once PathAuditor is certified
+                // as the only entrypoint for path data
+                return Err(DirstateMapError::ConsecutiveSlashes);
+            }
             if let Some(val) = self.inner.get_mut(subpath) {
                 *val += 1;
                 break;
             }
             self.inner.insert(subpath.to_owned(), 1);
         }
+        Ok(())
     }
 
     /// Decreases the count of deepest directory contained in the path.
@@ -82,9 +93,9 @@
     /// If the directory is not in the map, something horrible has happened.
     pub fn delete_path(
         &mut self,
-        path: &HgPath,
+        path: impl AsRef<HgPath>,
     ) -> Result<(), DirstateMapError> {
-        for subpath in files::find_dirs(path) {
+        for subpath in files::find_dirs(path.as_ref()) {
             match self.inner.entry(subpath.to_owned()) {
                 Entry::Occupied(mut entry) => {
                     let val = entry.get().clone();
@@ -96,7 +107,7 @@
                 }
                 Entry::Vacant(_) => {
                     return Err(DirstateMapError::PathNotFound(
-                        path.to_owned(),
+                        path.as_ref().to_owned(),
                     ))
                 }
             };
@@ -105,8 +116,8 @@
         Ok(())
     }
 
-    pub fn contains(&self, key: &HgPath) -> bool {
-        self.inner.contains_key(key)
+    pub fn contains(&self, key: impl AsRef<HgPath>) -> bool {
+        self.inner.contains_key(key.as_ref())
     }
 
     pub fn iter(&self) -> DirsMultisetIter {
@@ -121,11 +132,11 @@
 #[cfg(test)]
 mod tests {
     use super::*;
-    use std::collections::HashMap;
 
     #[test]
     fn test_delete_path_path_not_found() {
-        let mut map = DirsMultiset::from_manifest(&vec![]);
+        let manifest: Vec<HgPathBuf> = vec![];
+        let mut map = DirsMultiset::from_manifest(&manifest).unwrap();
         let path = HgPathBuf::from_bytes(b"doesnotexist/");
         assert_eq!(
             Err(DirstateMapError::PathNotFound(path.to_owned())),
@@ -135,7 +146,8 @@
 
     #[test]
     fn test_delete_path_empty_path() {
-        let mut map = DirsMultiset::from_manifest(&vec![HgPathBuf::new()]);
+        let mut map =
+            DirsMultiset::from_manifest(&vec![HgPathBuf::new()]).unwrap();
         let path = HgPath::new(b"");
         assert_eq!(Ok(()), map.delete_path(path));
         assert_eq!(
@@ -181,47 +193,49 @@
 
     #[test]
     fn test_add_path_empty_path() {
-        let mut map = DirsMultiset::from_manifest(&vec![]);
+        let manifest: Vec<HgPathBuf> = vec![];
+        let mut map = DirsMultiset::from_manifest(&manifest).unwrap();
         let path = HgPath::new(b"");
-        map.add_path(path);
+        map.add_path(path).unwrap();
 
         assert_eq!(1, map.len());
     }
 
     #[test]
     fn test_add_path_successful() {
-        let mut map = DirsMultiset::from_manifest(&vec![]);
+        let manifest: Vec<HgPathBuf> = vec![];
+        let mut map = DirsMultiset::from_manifest(&manifest).unwrap();
 
-        map.add_path(HgPath::new(b"a/"));
+        map.add_path(HgPath::new(b"a/")).unwrap();
         assert_eq!(1, *map.inner.get(HgPath::new(b"a")).unwrap());
         assert_eq!(1, *map.inner.get(HgPath::new(b"")).unwrap());
         assert_eq!(2, map.len());
 
         // Non directory should be ignored
-        map.add_path(HgPath::new(b"a"));
+        map.add_path(HgPath::new(b"a")).unwrap();
         assert_eq!(1, *map.inner.get(HgPath::new(b"a")).unwrap());
         assert_eq!(2, map.len());
 
         // Non directory will still add its base
-        map.add_path(HgPath::new(b"a/b"));
+        map.add_path(HgPath::new(b"a/b")).unwrap();
         assert_eq!(2, *map.inner.get(HgPath::new(b"a")).unwrap());
         assert_eq!(2, map.len());
 
         // Duplicate path works
-        map.add_path(HgPath::new(b"a/"));
+        map.add_path(HgPath::new(b"a/")).unwrap();
         assert_eq!(3, *map.inner.get(HgPath::new(b"a")).unwrap());
 
         // Nested dir adds to its base
-        map.add_path(HgPath::new(b"a/b/"));
+        map.add_path(HgPath::new(b"a/b/")).unwrap();
         assert_eq!(4, *map.inner.get(HgPath::new(b"a")).unwrap());
         assert_eq!(1, *map.inner.get(HgPath::new(b"a/b")).unwrap());
 
         // but not its base's base, because it already existed
-        map.add_path(HgPath::new(b"a/b/c/"));
+        map.add_path(HgPath::new(b"a/b/c/")).unwrap();
         assert_eq!(4, *map.inner.get(HgPath::new(b"a")).unwrap());
         assert_eq!(2, *map.inner.get(HgPath::new(b"a/b")).unwrap());
 
-        map.add_path(HgPath::new(b"a/c/"));
+        map.add_path(HgPath::new(b"a/c/")).unwrap();
         assert_eq!(1, *map.inner.get(HgPath::new(b"a/c")).unwrap());
 
         let expected = DirsMultiset {
@@ -235,22 +249,24 @@
 
     #[test]
     fn test_dirsmultiset_new_empty() {
-        let new = DirsMultiset::from_manifest(&vec![]);
+        let manifest: Vec<HgPathBuf> = vec![];
+        let new = DirsMultiset::from_manifest(&manifest).unwrap();
         let expected = DirsMultiset {
-            inner: HashMap::new(),
+            inner: FastHashMap::default(),
         };
         assert_eq!(expected, new);
 
-        let new = DirsMultiset::from_dirstate(&HashMap::new(), None);
+        let new = DirsMultiset::from_dirstate(&FastHashMap::default(), None)
+            .unwrap();
         let expected = DirsMultiset {
-            inner: HashMap::new(),
+            inner: FastHashMap::default(),
         };
         assert_eq!(expected, new);
     }
 
     #[test]
     fn test_dirsmultiset_new_no_skip() {
-        let input_vec = ["a/", "b/", "a/c", "a/d/"]
+        let input_vec: Vec<HgPathBuf> = ["a/", "b/", "a/c", "a/d/"]
             .iter()
             .map(|e| HgPathBuf::from_bytes(e.as_bytes()))
             .collect();
@@ -259,7 +275,7 @@
             .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
             .collect();
 
-        let new = DirsMultiset::from_manifest(&input_vec);
+        let new = DirsMultiset::from_manifest(&input_vec).unwrap();
         let expected = DirsMultiset {
             inner: expected_inner,
         };
@@ -284,7 +300,7 @@
             .map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
             .collect();
 
-        let new = DirsMultiset::from_dirstate(&input_map, None);
+        let new = DirsMultiset::from_dirstate(&input_map, None).unwrap();
         let expected = DirsMultiset {
             inner: expected_inner,
         };
@@ -320,7 +336,8 @@
             .collect();
 
         let new =
-            DirsMultiset::from_dirstate(&input_map, Some(EntryState::Normal));
+            DirsMultiset::from_dirstate(&input_map, Some(EntryState::Normal))
+                .unwrap();
         let expected = DirsMultiset {
             inner: expected_inner,
         };
--- a/rust/hg-core/src/dirstate/dirstate_map.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -5,26 +5,27 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
-use crate::utils::hg_path::{HgPath, HgPathBuf};
 use crate::{
-    dirstate::{parsers::PARENT_SIZE, EntryState},
+    dirstate::{parsers::PARENT_SIZE, EntryState, SIZE_FROM_OTHER_PARENT},
     pack_dirstate, parse_dirstate,
-    utils::files::normalize_case,
+    utils::{
+        files::normalize_case,
+        hg_path::{HgPath, HgPathBuf},
+    },
     CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateMapError,
-    DirstateParents, DirstateParseError, StateMap,
+    DirstateParents, DirstateParseError, FastHashMap, StateMap,
 };
 use core::borrow::Borrow;
-use std::collections::{HashMap, HashSet};
+use std::collections::HashSet;
 use std::convert::TryInto;
 use std::iter::FromIterator;
 use std::ops::Deref;
 use std::time::Duration;
 
-pub type FileFoldMap = HashMap<HgPathBuf, HgPathBuf>;
+pub type FileFoldMap = FastHashMap<HgPathBuf, HgPathBuf>;
 
 const NULL_ID: [u8; 20] = [0; 20];
 const MTIME_UNSET: i32 = -1;
-const SIZE_DIRTY: i32 = -2;
 
 #[derive(Default)]
 pub struct DirstateMap {
@@ -82,16 +83,16 @@
         filename: &HgPath,
         old_state: EntryState,
         entry: DirstateEntry,
-    ) {
+    ) -> Result<(), DirstateMapError> {
         if old_state == EntryState::Unknown || old_state == EntryState::Removed
         {
             if let Some(ref mut dirs) = self.dirs {
-                dirs.add_path(filename)
+                dirs.add_path(filename)?;
             }
         }
         if old_state == EntryState::Unknown {
             if let Some(ref mut all_dirs) = self.all_dirs {
-                all_dirs.add_path(filename)
+                all_dirs.add_path(filename)?;
             }
         }
         self.state_map.insert(filename.to_owned(), entry.to_owned());
@@ -100,9 +101,10 @@
             self.non_normal_set.insert(filename.to_owned());
         }
 
-        if entry.size == SIZE_DIRTY {
+        if entry.size == SIZE_FROM_OTHER_PARENT {
             self.other_parent_set.insert(filename.to_owned());
         }
+        Ok(())
     }
 
     /// Mark a file as removed in the dirstate.
@@ -124,7 +126,7 @@
         }
         if old_state == EntryState::Unknown {
             if let Some(ref mut all_dirs) = self.all_dirs {
-                all_dirs.add_path(filename);
+                all_dirs.add_path(filename)?;
             }
         }
 
@@ -212,7 +214,8 @@
             if *state != EntryState::Normal || *mtime == MTIME_UNSET {
                 non_normal.insert(filename.to_owned());
             }
-            if *state == EntryState::Normal && *size == SIZE_DIRTY {
+            if *state == EntryState::Normal && *size == SIZE_FROM_OTHER_PARENT
+            {
                 other_parent.insert(filename.to_owned());
             }
         }
@@ -224,30 +227,38 @@
     /// emulate a Python lazy property, but it is ugly and unidiomatic.
     /// TODO One day, rewriting this struct using the typestate might be a
     /// good idea.
-    pub fn set_all_dirs(&mut self) {
+    pub fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
         if self.all_dirs.is_none() {
             self.all_dirs =
-                Some(DirsMultiset::from_dirstate(&self.state_map, None));
+                Some(DirsMultiset::from_dirstate(&self.state_map, None)?);
         }
+        Ok(())
     }
 
-    pub fn set_dirs(&mut self) {
+    pub fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
         if self.dirs.is_none() {
             self.dirs = Some(DirsMultiset::from_dirstate(
                 &self.state_map,
                 Some(EntryState::Removed),
-            ));
+            )?);
         }
+        Ok(())
     }
 
-    pub fn has_tracked_dir(&mut self, directory: &HgPath) -> bool {
-        self.set_dirs();
-        self.dirs.as_ref().unwrap().contains(directory)
+    pub fn has_tracked_dir(
+        &mut self,
+        directory: &HgPath,
+    ) -> Result<bool, DirstateMapError> {
+        self.set_dirs()?;
+        Ok(self.dirs.as_ref().unwrap().contains(directory))
     }
 
-    pub fn has_dir(&mut self, directory: &HgPath) -> bool {
-        self.set_all_dirs();
-        self.all_dirs.as_ref().unwrap().contains(directory)
+    pub fn has_dir(
+        &mut self,
+        directory: &HgPath,
+    ) -> Result<bool, DirstateMapError> {
+        self.set_all_dirs()?;
+        Ok(self.all_dirs.as_ref().unwrap().contains(directory))
     }
 
     pub fn parents(
@@ -324,7 +335,7 @@
         if let Some(ref file_fold_map) = self.file_fold_map {
             return file_fold_map;
         }
-        let mut new_file_fold_map = FileFoldMap::new();
+        let mut new_file_fold_map = FileFoldMap::default();
         for (filename, DirstateEntry { state, .. }) in self.state_map.borrow()
         {
             if *state == EntryState::Removed {
@@ -347,11 +358,11 @@
         assert!(map.dirs.is_none());
         assert!(map.all_dirs.is_none());
 
-        assert_eq!(false, map.has_dir(HgPath::new(b"nope")));
+        assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
         assert!(map.all_dirs.is_some());
         assert!(map.dirs.is_none());
 
-        assert_eq!(false, map.has_tracked_dir(HgPath::new(b"nope")));
+        assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
         assert!(map.dirs.is_some());
     }
 
@@ -370,7 +381,8 @@
                 mtime: 1337,
                 size: 1337,
             },
-        );
+        )
+        .unwrap();
 
         assert_eq!(1, map.len());
         assert_eq!(0, map.non_normal_set.len());
--- a/rust/hg-core/src/dirstate/parsers.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/dirstate/parsers.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -157,13 +157,12 @@
 #[cfg(test)]
 mod tests {
     use super::*;
-    use crate::utils::hg_path::HgPathBuf;
-    use std::collections::HashMap;
+    use crate::{utils::hg_path::HgPathBuf, FastHashMap};
 
     #[test]
     fn test_pack_dirstate_empty() {
-        let mut state_map: StateMap = HashMap::new();
-        let copymap = HashMap::new();
+        let mut state_map: StateMap = FastHashMap::default();
+        let copymap = FastHashMap::default();
         let parents = DirstateParents {
             p1: *b"12345678910111213141",
             p2: *b"00000000000000000000",
@@ -194,7 +193,7 @@
         .collect();
         let mut state_map = expected_state_map.clone();
 
-        let copymap = HashMap::new();
+        let copymap = FastHashMap::default();
         let parents = DirstateParents {
             p1: *b"12345678910111213141",
             p2: *b"00000000000000000000",
@@ -230,7 +229,7 @@
         .cloned()
         .collect();
         let mut state_map = expected_state_map.clone();
-        let mut copymap = HashMap::new();
+        let mut copymap = FastHashMap::default();
         copymap.insert(
             HgPathBuf::from_bytes(b"f1"),
             HgPathBuf::from_bytes(b"copyname"),
@@ -270,7 +269,7 @@
         .iter()
         .cloned()
         .collect();
-        let mut copymap = HashMap::new();
+        let mut copymap = FastHashMap::default();
         copymap.insert(
             HgPathBuf::from_bytes(b"f1"),
             HgPathBuf::from_bytes(b"copyname"),
@@ -284,8 +283,8 @@
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
 
-        let mut new_state_map: StateMap = HashMap::new();
-        let mut new_copy_map: CopyMap = HashMap::new();
+        let mut new_state_map: StateMap = FastHashMap::default();
+        let mut new_copy_map: CopyMap = FastHashMap::default();
         let new_parents = parse_dirstate(
             &mut new_state_map,
             &mut new_copy_map,
@@ -341,7 +340,7 @@
         .iter()
         .cloned()
         .collect();
-        let mut copymap = HashMap::new();
+        let mut copymap = FastHashMap::default();
         copymap.insert(
             HgPathBuf::from_bytes(b"f1"),
             HgPathBuf::from_bytes(b"copyname"),
@@ -359,8 +358,8 @@
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
 
-        let mut new_state_map: StateMap = HashMap::new();
-        let mut new_copy_map: CopyMap = HashMap::new();
+        let mut new_state_map: StateMap = FastHashMap::default();
+        let mut new_copy_map: CopyMap = FastHashMap::default();
         let new_parents = parse_dirstate(
             &mut new_state_map,
             &mut new_copy_map,
@@ -388,7 +387,7 @@
         .iter()
         .cloned()
         .collect();
-        let mut copymap = HashMap::new();
+        let mut copymap = FastHashMap::default();
         copymap.insert(
             HgPathBuf::from_bytes(b"f1"),
             HgPathBuf::from_bytes(b"copyname"),
@@ -402,8 +401,8 @@
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
 
-        let mut new_state_map: StateMap = HashMap::new();
-        let mut new_copy_map: CopyMap = HashMap::new();
+        let mut new_state_map: StateMap = FastHashMap::default();
+        let mut new_copy_map: CopyMap = FastHashMap::default();
         let new_parents = parse_dirstate(
             &mut new_state_map,
             &mut new_copy_map,
--- a/rust/hg-core/src/dirstate/status.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/dirstate/status.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -9,131 +9,235 @@
 //! It is currently missing a lot of functionality compared to the Python one
 //! and will only be triggered in narrow cases.
 
-use crate::utils::files::HgMetadata;
-use crate::utils::hg_path::{hg_path_to_path_buf, HgPath, HgPathBuf};
-use crate::{DirstateEntry, DirstateMap, EntryState};
+use crate::{
+    dirstate::SIZE_FROM_OTHER_PARENT,
+    matchers::Matcher,
+    utils::{
+        files::HgMetadata,
+        hg_path::{hg_path_to_path_buf, HgPath},
+    },
+    CopyMap, DirstateEntry, DirstateMap, EntryState,
+};
 use rayon::prelude::*;
-use std::collections::HashMap;
-use std::fs::Metadata;
+use std::collections::HashSet;
 use std::path::Path;
 
+/// Marker enum used to dispatch new status entries into the right collections.
+/// Is similar to `crate::EntryState`, but represents the transient state of
+/// entries during the lifetime of a command.
+enum Dispatch {
+    Unsure,
+    Modified,
+    Added,
+    Removed,
+    Deleted,
+    Clean,
+    Unknown,
+}
+
+type IoResult<T> = std::io::Result<T>;
+
+/// Dates and times that are outside the 31-bit signed range are compared
+/// modulo 2^31. This should prevent hg from behaving badly with very large
+/// files or corrupt dates while still having a high probability of detecting
+/// changes. (issue2608)
+/// TODO I haven't found a way of having `b` be `Into<i32>`, since `From<u64>`
+/// is not defined for `i32`, and there is no `As` trait. This forces the
+/// caller to cast `b` as `i32`.
+fn mod_compare(a: i32, b: i32) -> bool {
+    a & i32::max_value() != b & i32::max_value()
+}
+
+/// The file corresponding to the dirstate entry was found on the filesystem.
+fn dispatch_found(
+    filename: impl AsRef<HgPath>,
+    entry: DirstateEntry,
+    metadata: HgMetadata,
+    copy_map: &CopyMap,
+    check_exec: bool,
+    list_clean: bool,
+    last_normal_time: i64,
+) -> Dispatch {
+    let DirstateEntry {
+        state,
+        mode,
+        mtime,
+        size,
+    } = entry;
+
+    let HgMetadata {
+        st_mode,
+        st_size,
+        st_mtime,
+        ..
+    } = metadata;
+
+    match state {
+        EntryState::Normal => {
+            let size_changed = mod_compare(size, st_size as i32);
+            let mode_changed =
+                (mode ^ st_mode as i32) & 0o100 != 0o000 && check_exec;
+            let metadata_changed = size >= 0 && (size_changed || mode_changed);
+            let other_parent = size == SIZE_FROM_OTHER_PARENT;
+            if metadata_changed
+                || other_parent
+                || copy_map.contains_key(filename.as_ref())
+            {
+                Dispatch::Modified
+            } else if mod_compare(mtime, st_mtime as i32) {
+                Dispatch::Unsure
+            } else if st_mtime == last_normal_time {
+                // the file may have just been marked as normal and
+                // it may have changed in the same second without
+                // changing its size. This can happen if we quickly
+                // do multiple commits. Force lookup, so we don't
+                // miss such a racy file change.
+                Dispatch::Unsure
+            } else if list_clean {
+                Dispatch::Clean
+            } else {
+                Dispatch::Unknown
+            }
+        }
+        EntryState::Merged => Dispatch::Modified,
+        EntryState::Added => Dispatch::Added,
+        EntryState::Removed => Dispatch::Removed,
+        EntryState::Unknown => Dispatch::Unknown,
+    }
+}
+
+/// The file corresponding to this Dirstate entry is missing.
+fn dispatch_missing(state: EntryState) -> Dispatch {
+    match state {
+        // File was removed from the filesystem during commands
+        EntryState::Normal | EntryState::Merged | EntryState::Added => {
+            Dispatch::Deleted
+        }
+        // File was removed, everything is normal
+        EntryState::Removed => Dispatch::Removed,
+        // File is unknown to Mercurial, everything is normal
+        EntryState::Unknown => Dispatch::Unknown,
+    }
+}
+
 /// Get stat data about the files explicitly specified by match.
 /// TODO subrepos
-fn walk_explicit(
-    files: &[impl AsRef<HgPath> + Sync],
-    dmap: &DirstateMap,
-    root_dir: impl AsRef<Path> + Sync,
-) -> std::io::Result<HashMap<HgPathBuf, Option<HgMetadata>>> {
-    let mut results = HashMap::new();
-
-    // A tuple of the normalized filename and the `Result` of the call to
-    // `symlink_metadata` for separate handling.
-    type WalkTuple<'a> = (&'a HgPath, std::io::Result<Metadata>);
+fn walk_explicit<'a>(
+    files: &'a HashSet<&HgPath>,
+    dmap: &'a DirstateMap,
+    root_dir: impl AsRef<Path> + Sync + Send,
+    check_exec: bool,
+    list_clean: bool,
+    last_normal_time: i64,
+) -> impl ParallelIterator<Item = IoResult<(&'a HgPath, Dispatch)>> {
+    files.par_iter().filter_map(move |filename| {
+        // TODO normalization
+        let normalized = filename.as_ref();
 
-    let stats_res: std::io::Result<Vec<WalkTuple>> = files
-        .par_iter()
-        .map(|filename| {
-            // TODO normalization
-            let normalized = filename.as_ref();
-
-            let target_filename =
-                root_dir.as_ref().join(hg_path_to_path_buf(normalized)?);
-
-            Ok((normalized, target_filename.symlink_metadata()))
-        })
-        .collect();
-
-    for res in stats_res? {
-        match res {
-            (normalized, Ok(stat)) => {
-                if stat.is_file() {
-                    results.insert(
-                        normalized.to_owned(),
-                        Some(HgMetadata::from_metadata(stat)),
-                    );
+        let buf = match hg_path_to_path_buf(normalized) {
+            Ok(x) => x,
+            Err(e) => return Some(Err(e.into())),
+        };
+        let target = root_dir.as_ref().join(buf);
+        let st = target.symlink_metadata();
+        match st {
+            Ok(meta) => {
+                let file_type = meta.file_type();
+                if file_type.is_file() || file_type.is_symlink() {
+                    if let Some(entry) = dmap.get(normalized) {
+                        return Some(Ok((
+                            normalized,
+                            dispatch_found(
+                                &normalized,
+                                *entry,
+                                HgMetadata::from_metadata(meta),
+                                &dmap.copy_map,
+                                check_exec,
+                                list_clean,
+                                last_normal_time,
+                            ),
+                        )));
+                    }
                 } else {
                     if dmap.contains_key(normalized) {
-                        results.insert(normalized.to_owned(), None);
+                        return Some(Ok((normalized, Dispatch::Removed)));
                     }
                 }
             }
-            (normalized, Err(_)) => {
-                if dmap.contains_key(normalized) {
-                    results.insert(normalized.to_owned(), None);
+            Err(_) => {
+                if let Some(entry) = dmap.get(normalized) {
+                    return Some(Ok((
+                        normalized,
+                        dispatch_missing(entry.state),
+                    )));
                 }
             }
         };
-    }
-
-    Ok(results)
+        None
+    })
 }
 
-// Stat all entries in the `DirstateMap` and return their new metadata.
-pub fn stat_dmap_entries(
+/// Stat all entries in the `DirstateMap` and mark them for dispatch into
+/// the relevant collections.
+fn stat_dmap_entries(
     dmap: &DirstateMap,
-    results: &HashMap<HgPathBuf, Option<HgMetadata>>,
-    root_dir: impl AsRef<Path> + Sync,
-) -> std::io::Result<Vec<(HgPathBuf, Option<HgMetadata>)>> {
-    dmap.par_iter()
-        .filter_map(
-            // Getting file metadata is costly, so we don't do it if the
-            // file is already present in the results, hence `filter_map`
-            |(filename, _)| -> Option<
-                std::io::Result<(HgPathBuf, Option<HgMetadata>)>
-            > {
-                if results.contains_key(filename) {
-                    return None;
-                }
-                let meta = match hg_path_to_path_buf(filename) {
-                    Ok(p) => root_dir.as_ref().join(p).symlink_metadata(),
-                    Err(e) => return Some(Err(e.into())),
-                };
+    root_dir: impl AsRef<Path> + Sync + Send,
+    check_exec: bool,
+    list_clean: bool,
+    last_normal_time: i64,
+) -> impl ParallelIterator<Item = IoResult<(&HgPath, Dispatch)>> {
+    dmap.par_iter().map(move |(filename, entry)| {
+        let filename: &HgPath = filename;
+        let filename_as_path = hg_path_to_path_buf(filename)?;
+        let meta = root_dir.as_ref().join(filename_as_path).symlink_metadata();
 
-                Some(match meta {
-                    Ok(ref m)
-                        if !(m.file_type().is_file()
-                            || m.file_type().is_symlink()) =>
-                    {
-                        Ok((filename.to_owned(), None))
-                    }
-                    Ok(m) => Ok((
-                        filename.to_owned(),
-                        Some(HgMetadata::from_metadata(m)),
-                    )),
-                    Err(ref e)
-                        if e.kind() == std::io::ErrorKind::NotFound
-                            || e.raw_os_error() == Some(20) =>
-                    {
-                        // Rust does not yet have an `ErrorKind` for
-                        // `NotADirectory` (errno 20)
-                        // It happens if the dirstate contains `foo/bar` and
-                        // foo is not a directory
-                        Ok((filename.to_owned(), None))
-                    }
-                    Err(e) => Err(e),
-                })
-            },
-        )
-        .collect()
+        match meta {
+            Ok(ref m)
+                if !(m.file_type().is_file()
+                    || m.file_type().is_symlink()) =>
+            {
+                Ok((filename, dispatch_missing(entry.state)))
+            }
+            Ok(m) => Ok((
+                filename,
+                dispatch_found(
+                    filename,
+                    *entry,
+                    HgMetadata::from_metadata(m),
+                    &dmap.copy_map,
+                    check_exec,
+                    list_clean,
+                    last_normal_time,
+                ),
+            )),
+            Err(ref e)
+                if e.kind() == std::io::ErrorKind::NotFound
+                    || e.raw_os_error() == Some(20) =>
+            {
+                // Rust does not yet have an `ErrorKind` for
+                // `NotADirectory` (errno 20)
+                // It happens if the dirstate contains `foo/bar` and
+                // foo is not a directory
+                Ok((filename, dispatch_missing(entry.state)))
+            }
+            Err(e) => Err(e),
+        }
+    })
 }
 
-pub struct StatusResult {
-    pub modified: Vec<HgPathBuf>,
-    pub added: Vec<HgPathBuf>,
-    pub removed: Vec<HgPathBuf>,
-    pub deleted: Vec<HgPathBuf>,
-    pub clean: Vec<HgPathBuf>,
-    // TODO ignored
-    // TODO unknown
+pub struct StatusResult<'a> {
+    pub modified: Vec<&'a HgPath>,
+    pub added: Vec<&'a HgPath>,
+    pub removed: Vec<&'a HgPath>,
+    pub deleted: Vec<&'a HgPath>,
+    pub clean: Vec<&'a HgPath>,
+    /* TODO ignored
+     * TODO unknown */
 }
 
-fn build_response(
-    dmap: &DirstateMap,
-    list_clean: bool,
-    last_normal_time: i64,
-    check_exec: bool,
-    results: HashMap<HgPathBuf, Option<HgMetadata>>,
-) -> (Vec<HgPathBuf>, StatusResult) {
+fn build_response<'a>(
+    results: impl IntoIterator<Item = IoResult<(&'a HgPath, Dispatch)>>,
+) -> IoResult<(Vec<&'a HgPath>, StatusResult<'a>)> {
     let mut lookup = vec![];
     let mut modified = vec![];
     let mut added = vec![];
@@ -141,80 +245,20 @@
     let mut deleted = vec![];
     let mut clean = vec![];
 
-    for (filename, metadata_option) in results.into_iter() {
-        let DirstateEntry {
-            state,
-            mode,
-            mtime,
-            size,
-        } = match dmap.get(&filename) {
-            None => {
-                continue;
-            }
-            Some(e) => *e,
-        };
-
-        match metadata_option {
-            None => {
-                match state {
-                    EntryState::Normal
-                    | EntryState::Merged
-                    | EntryState::Added => deleted.push(filename),
-                    EntryState::Removed => removed.push(filename),
-                    _ => {}
-                };
-            }
-            Some(HgMetadata {
-                st_mode,
-                st_size,
-                st_mtime,
-                ..
-            }) => {
-                match state {
-                    EntryState::Normal => {
-                        // Dates and times that are outside the 31-bit signed
-                        // range are compared modulo 2^31. This should prevent
-                        // it from behaving badly with very large files or
-                        // corrupt dates while still having a high probability
-                        // of detecting changes. (issue2608)
-                        let range_mask = 0x7fffffff;
-
-                        let size_changed = (size != st_size as i32)
-                            && size != (st_size as i32 & range_mask);
-                        let mode_changed = (mode ^ st_mode as i32) & 0o100
-                            != 0o000
-                            && check_exec;
-                        if size >= 0
-                            && (size_changed || mode_changed)
-                            || size == -2  // other parent
-                            || dmap.copy_map.contains_key(&filename)
-                        {
-                            modified.push(filename);
-                        } else if mtime != st_mtime as i32
-                            && mtime != (st_mtime as i32 & range_mask)
-                        {
-                            lookup.push(filename);
-                        } else if st_mtime == last_normal_time {
-                            // the file may have just been marked as normal and
-                            // it may have changed in the same second without
-                            // changing its size. This can happen if we quickly
-                            // do multiple commits. Force lookup, so we don't
-                            // miss such a racy file change.
-                            lookup.push(filename);
-                        } else if list_clean {
-                            clean.push(filename);
-                        }
-                    }
-                    EntryState::Merged => modified.push(filename),
-                    EntryState::Added => added.push(filename),
-                    EntryState::Removed => removed.push(filename),
-                    EntryState::Unknown => {}
-                }
-            }
+    for res in results.into_iter() {
+        let (filename, dispatch) = res?;
+        match dispatch {
+            Dispatch::Unknown => {}
+            Dispatch::Unsure => lookup.push(filename),
+            Dispatch::Modified => modified.push(filename),
+            Dispatch::Added => added.push(filename),
+            Dispatch::Removed => removed.push(filename),
+            Dispatch::Deleted => deleted.push(filename),
+            Dispatch::Clean => clean.push(filename),
         }
     }
 
-    (
+    Ok((
         lookup,
         StatusResult {
             modified,
@@ -223,26 +267,40 @@
             deleted,
             clean,
         },
-    )
+    ))
 }
 
-pub fn status(
-    dmap: &DirstateMap,
-    root_dir: impl AsRef<Path> + Sync + Copy,
-    files: &[impl AsRef<HgPath> + Sync],
+pub fn status<'a: 'c, 'b: 'c, 'c>(
+    dmap: &'a DirstateMap,
+    matcher: &'b (impl Matcher),
+    root_dir: impl AsRef<Path> + Sync + Send + Copy,
     list_clean: bool,
     last_normal_time: i64,
     check_exec: bool,
-) -> std::io::Result<(Vec<HgPathBuf>, StatusResult)> {
-    let mut results = walk_explicit(files, &dmap, root_dir)?;
-
-    results.extend(stat_dmap_entries(&dmap, &results, root_dir)?);
+) -> IoResult<(Vec<&'c HgPath>, StatusResult<'c>)> {
+    let files = matcher.file_set();
+    let mut results = vec![];
+    if let Some(files) = files {
+        results.par_extend(walk_explicit(
+            &files,
+            &dmap,
+            root_dir,
+            check_exec,
+            list_clean,
+            last_normal_time,
+        ));
+    }
 
-    Ok(build_response(
-        &dmap,
-        list_clean,
-        last_normal_time,
-        check_exec,
-        results,
-    ))
+    if !matcher.is_exact() {
+        let stat_results = stat_dmap_entries(
+            &dmap,
+            root_dir,
+            check_exec,
+            list_clean,
+            last_normal_time,
+        );
+        results.par_extend(stat_results);
+    }
+
+    build_response(results)
 }
--- a/rust/hg-core/src/discovery.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/discovery.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -11,21 +11,21 @@
 //! `mercurial.setdiscovery`
 
 use super::{Graph, GraphError, Revision, NULL_REVISION};
-use crate::ancestors::MissingAncestors;
-use crate::dagops;
+use crate::{ancestors::MissingAncestors, dagops, FastHashMap};
 use rand::seq::SliceRandom;
 use rand::{thread_rng, RngCore, SeedableRng};
 use std::cmp::{max, min};
-use std::collections::{HashMap, HashSet, VecDeque};
+use std::collections::{HashSet, VecDeque};
 
 type Rng = rand_pcg::Pcg32;
+type Seed = [u8; 16];
 
 pub struct PartialDiscovery<G: Graph + Clone> {
     target_heads: Option<Vec<Revision>>,
     graph: G, // plays the role of self._repo
     common: MissingAncestors<G>,
     undecided: Option<HashSet<Revision>>,
-    children_cache: Option<HashMap<Revision, Vec<Revision>>>,
+    children_cache: Option<FastHashMap<Revision, Vec<Revision>>>,
     missing: HashSet<Revision>,
     rng: Rng,
     respect_size: bool,
@@ -61,7 +61,7 @@
 where
     I: Iterator<Item = Revision>,
 {
-    let mut distances: HashMap<Revision, u32> = HashMap::new();
+    let mut distances: FastHashMap<Revision, u32> = FastHashMap::default();
     let mut visit: VecDeque<Revision> = heads.into_iter().collect();
     let mut factor: u32 = 1;
     let mut seen: HashSet<Revision> = HashSet::new();
@@ -159,7 +159,7 @@
         respect_size: bool,
         randomize: bool,
     ) -> Self {
-        let mut seed: [u8; 16] = [0; 16];
+        let mut seed = [0; 16];
         if randomize {
             thread_rng().fill_bytes(&mut seed);
         }
@@ -169,7 +169,7 @@
     pub fn new_with_seed(
         graph: G,
         target_heads: Vec<Revision>,
-        seed: [u8; 16],
+        seed: Seed,
         respect_size: bool,
         randomize: bool,
     ) -> Self {
@@ -328,7 +328,8 @@
         }
         self.ensure_undecided()?;
 
-        let mut children: HashMap<Revision, Vec<Revision>> = HashMap::new();
+        let mut children: FastHashMap<Revision, Vec<Revision>> =
+            FastHashMap::default();
         for &rev in self.undecided.as_ref().unwrap() {
             for p in ParentsIterator::graph_parents(&self.graph, rev)? {
                 children.entry(p).or_insert_with(|| Vec::new()).push(rev);
--- a/rust/hg-core/src/filepatterns.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/filepatterns.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -7,10 +7,11 @@
 
 //! Handling of Mercurial-specific patterns.
 
-use crate::{utils::SliceExt, LineNumber, PatternError, PatternFileError};
+use crate::{
+    utils::SliceExt, FastHashMap, LineNumber, PatternError, PatternFileError,
+};
 use lazy_static::lazy_static;
 use regex::bytes::{NoExpand, Regex};
-use std::collections::HashMap;
 use std::fs::File;
 use std::io::Read;
 use std::path::{Path, PathBuf};
@@ -214,8 +215,8 @@
 }
 
 lazy_static! {
-    static ref SYNTAXES: HashMap<&'static [u8], &'static [u8]> = {
-        let mut m = HashMap::new();
+    static ref SYNTAXES: FastHashMap<&'static [u8], &'static [u8]> = {
+        let mut m = FastHashMap::default();
 
         m.insert(b"re".as_ref(), b"relre:".as_ref());
         m.insert(b"regexp".as_ref(), b"relre:".as_ref());
--- a/rust/hg-core/src/lib.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/lib.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -1,4 +1,5 @@
-// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
+// Copyright 2018-2020 Georges Racinet <georges.racinet@octobus.net>
+//           and Mercurial contributors
 //
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
@@ -12,51 +13,29 @@
     dirs_multiset::{DirsMultiset, DirsMultisetIter},
     dirstate_map::DirstateMap,
     parsers::{pack_dirstate, parse_dirstate, PARENT_SIZE},
-    status::status,
+    status::{status, StatusResult},
     CopyMap, CopyMapIter, DirstateEntry, DirstateParents, EntryState,
     StateMap, StateMapIter,
 };
 mod filepatterns;
+pub mod matchers;
+pub mod revlog;
+pub use revlog::*;
 pub mod utils;
 
 use crate::utils::hg_path::HgPathBuf;
 pub use filepatterns::{
     build_single_regex, read_pattern_file, PatternSyntax, PatternTuple,
 };
-
-/// Mercurial revision numbers
-///
-/// As noted in revlog.c, revision numbers are actually encoded in
-/// 4 bytes, and are liberally converted to ints, whence the i32
-pub type Revision = i32;
-
-/// Marker expressing the absence of a parent
-///
-/// Independently of the actual representation, `NULL_REVISION` is guaranteed
-/// to be smaller that all existing revisions.
-pub const NULL_REVISION: Revision = -1;
-
-/// Same as `mercurial.node.wdirrev`
-///
-/// This is also equal to `i32::max_value()`, but it's better to spell
-/// it out explicitely, same as in `mercurial.node`
-pub const WORKING_DIRECTORY_REVISION: Revision = 0x7fffffff;
-
-/// The simplest expression of what we need of Mercurial DAGs.
-pub trait Graph {
-    /// Return the two parents of the given `Revision`.
-    ///
-    /// Each of the parents can be independently `NULL_REVISION`
-    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError>;
-}
+use std::collections::HashMap;
+use twox_hash::RandomXxHashBuilder64;
 
 pub type LineNumber = usize;
 
-#[derive(Clone, Debug, PartialEq)]
-pub enum GraphError {
-    ParentOutOfRange(Revision),
-    WorkingDirectoryUnsupported,
-}
+/// Rust's default hasher is too slow because it tries to prevent collision
+/// attacks. We are not concerned about those: if an ill-minded person has
+/// write access to your repository, you have other issues.
+pub type FastHashMap<K, V> = HashMap<K, V, RandomXxHashBuilder64>;
 
 #[derive(Clone, Debug, PartialEq)]
 pub enum DirstateParseError {
@@ -100,6 +79,20 @@
 pub enum DirstateMapError {
     PathNotFound(HgPathBuf),
     EmptyPath,
+    ConsecutiveSlashes,
+}
+
+impl ToString for DirstateMapError {
+    fn to_string(&self) -> String {
+        use crate::DirstateMapError::*;
+        match self {
+            PathNotFound(_) => "expected a value, found none".to_string(),
+            EmptyPath => "Overflow in dirstate.".to_string(),
+            ConsecutiveSlashes => {
+                "found invalid consecutive slashes in path".to_string()
+            }
+        }
+    }
 }
 
 pub enum DirstateError {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/matchers.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,179 @@
+// matchers.rs
+//
+// Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Structs and types for matching files and directories.
+
+use crate::{utils::hg_path::HgPath, DirsMultiset, DirstateMapError};
+use std::collections::HashSet;
+use std::iter::FromIterator;
+
+pub enum VisitChildrenSet<'a> {
+    /// Don't visit anything
+    Empty,
+    /// Only visit this directory
+    This,
+    /// Visit this directory and these subdirectories
+    /// TODO Should we implement a `NonEmptyHashSet`?
+    Set(HashSet<&'a HgPath>),
+    /// Visit this directory and all subdirectories
+    Recursive,
+}
+
+pub trait Matcher {
+    /// Explicitly listed files
+    fn file_set(&self) -> Option<&HashSet<&HgPath>>;
+    /// Returns whether `filename` is in `file_set`
+    fn exact_match(&self, filename: impl AsRef<HgPath>) -> bool;
+    /// Returns whether `filename` is matched by this matcher
+    fn matches(&self, filename: impl AsRef<HgPath>) -> bool;
+    /// Decides whether a directory should be visited based on whether it
+    /// has potential matches in it or one of its subdirectories, and
+    /// potentially lists which subdirectories of that directory should be
+    /// visited. This is based on the match's primary, included, and excluded
+    /// patterns.
+    ///
+    /// # Example
+    ///
+    /// Assume matchers `['path:foo/bar', 'rootfilesin:qux']`, we would
+    /// return the following values (assuming the implementation of
+    /// visit_children_set is capable of recognizing this; some implementations
+    /// are not).
+    ///
+    /// ```text
+    /// ```ignore
+    /// '' -> {'foo', 'qux'}
+    /// 'baz' -> set()
+    /// 'foo' -> {'bar'}
+    /// // Ideally this would be `Recursive`, but since the prefix nature of
+    /// // matchers is applied to the entire matcher, we have to downgrade this
+    /// // to `This` due to the (yet to be implemented in Rust) non-prefix
+    /// // `RootFilesIn'-kind matcher being mixed in.
+    /// 'foo/bar' -> 'this'
+    /// 'qux' -> 'this'
+    /// ```
+    /// # Important
+    ///
+    /// Most matchers do not know if they're representing files or
+    /// directories. They see `['path:dir/f']` and don't know whether `f` is a
+    /// file or a directory, so `visit_children_set('dir')` for most matchers
+    /// will return `HashSet{ HgPath { "f" } }`, but if the matcher knows it's
+    /// a file (like the yet to be implemented in Rust `ExactMatcher` does),
+    /// it may return `VisitChildrenSet::This`.
+    /// Do not rely on the return being a `HashSet` indicating that there are
+    /// no files in this dir to investigate (or equivalently that if there are
+    /// files to investigate in 'dir' that it will always return
+    /// `VisitChildrenSet::This`).
+    fn visit_children_set(
+        &self,
+        directory: impl AsRef<HgPath>,
+    ) -> VisitChildrenSet;
+    /// Matcher will match everything and `files_set()` will be empty:
+    /// optimization might be possible.
+    fn matches_everything(&self) -> bool;
+    /// Matcher will match exactly the files in `files_set()`: optimization
+    /// might be possible.
+    fn is_exact(&self) -> bool;
+}
+
+/// Matches everything.
+///```
+/// use hg::{ matchers::{Matcher, AlwaysMatcher}, utils::hg_path::HgPath };
+///
+/// let matcher = AlwaysMatcher;
+///
+/// assert_eq!(matcher.matches(HgPath::new(b"whatever")), true);
+/// assert_eq!(matcher.matches(HgPath::new(b"b.txt")), true);
+/// assert_eq!(matcher.matches(HgPath::new(b"main.c")), true);
+/// assert_eq!(matcher.matches(HgPath::new(br"re:.*\.c$")), true);
+/// ```
+#[derive(Debug)]
+pub struct AlwaysMatcher;
+
+impl Matcher for AlwaysMatcher {
+    fn file_set(&self) -> Option<&HashSet<&HgPath>> {
+        None
+    }
+    fn exact_match(&self, _filename: impl AsRef<HgPath>) -> bool {
+        false
+    }
+    fn matches(&self, _filename: impl AsRef<HgPath>) -> bool {
+        true
+    }
+    fn visit_children_set(
+        &self,
+        _directory: impl AsRef<HgPath>,
+    ) -> VisitChildrenSet {
+        VisitChildrenSet::Recursive
+    }
+    fn matches_everything(&self) -> bool {
+        true
+    }
+    fn is_exact(&self) -> bool {
+        false
+    }
+}
+
+/// Matches the input files exactly. They are interpreted as paths, not
+/// patterns.
+///
+///```
+/// use hg::{ matchers::{Matcher, FileMatcher}, utils::hg_path::HgPath };
+///
+/// let files = [HgPath::new(b"a.txt"), HgPath::new(br"re:.*\.c$")];
+/// let matcher = FileMatcher::new(&files).unwrap();
+///
+/// assert_eq!(matcher.matches(HgPath::new(b"a.txt")), true);
+/// assert_eq!(matcher.matches(HgPath::new(b"b.txt")), false);
+/// assert_eq!(matcher.matches(HgPath::new(b"main.c")), false);
+/// assert_eq!(matcher.matches(HgPath::new(br"re:.*\.c$")), true);
+/// ```
+#[derive(Debug)]
+pub struct FileMatcher<'a> {
+    files: HashSet<&'a HgPath>,
+    dirs: DirsMultiset,
+}
+
+impl<'a> FileMatcher<'a> {
+    pub fn new(
+        files: &'a [impl AsRef<HgPath>],
+    ) -> Result<Self, DirstateMapError> {
+        Ok(Self {
+            files: HashSet::from_iter(files.iter().map(|f| f.as_ref())),
+            dirs: DirsMultiset::from_manifest(files)?,
+        })
+    }
+    fn inner_matches(&self, filename: impl AsRef<HgPath>) -> bool {
+        self.files.contains(filename.as_ref())
+    }
+}
+
+impl<'a> Matcher for FileMatcher<'a> {
+    fn file_set(&self) -> Option<&HashSet<&HgPath>> {
+        Some(&self.files)
+    }
+    fn exact_match(&self, filename: impl AsRef<HgPath>) -> bool {
+        self.inner_matches(filename)
+    }
+    fn matches(&self, filename: impl AsRef<HgPath>) -> bool {
+        self.inner_matches(filename)
+    }
+    fn visit_children_set(
+        &self,
+        _directory: impl AsRef<HgPath>,
+    ) -> VisitChildrenSet {
+        // TODO implement once we have `status.traverse`
+        // This is useless until unknown files are taken into account
+        // Which will not need to happen before the `IncludeMatcher`.
+        unimplemented!()
+    }
+    fn matches_everything(&self) -> bool {
+        false
+    }
+    fn is_exact(&self) -> bool {
+        true
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,38 @@
+// Copyright 2018-2020 Georges Racinet <georges.racinet@octobus.net>
+//           and Mercurial contributors
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+//! Mercurial concepts for handling revision history
+
+/// Mercurial revision numbers
+///
+/// As noted in revlog.c, revision numbers are actually encoded in
+/// 4 bytes, and are liberally converted to ints, whence the i32
+pub type Revision = i32;
+
+/// Marker expressing the absence of a parent
+///
+/// Independently of the actual representation, `NULL_REVISION` is guaranteed
+/// to be smaller than all existing revisions.
+pub const NULL_REVISION: Revision = -1;
+
+/// Same as `mercurial.node.wdirrev`
+///
+/// This is also equal to `i32::max_value()`, but it's better to spell
+/// it out explicitely, same as in `mercurial.node`
+pub const WORKING_DIRECTORY_REVISION: Revision = 0x7fffffff;
+
+/// The simplest expression of what we need of Mercurial DAGs.
+pub trait Graph {
+    /// Return the two parents of the given `Revision`.
+    ///
+    /// Each of the parents can be independently `NULL_REVISION`
+    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError>;
+}
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum GraphError {
+    ParentOutOfRange(Revision),
+    WorkingDirectoryUnsupported,
+}
--- a/rust/hg-core/src/utils.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/utils.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -10,6 +10,26 @@
 pub mod files;
 pub mod hg_path;
 
+/// Useful until rust/issues/56345 is stable
+///
+/// # Examples
+///
+/// ```
+/// use crate::hg::utils::find_slice_in_slice;
+///
+/// let haystack = b"This is the haystack".to_vec();
+/// assert_eq!(find_slice_in_slice(&haystack, b"the"), Some(8));
+/// assert_eq!(find_slice_in_slice(&haystack, b"not here"), None);
+/// ```
+pub fn find_slice_in_slice<T>(slice: &[T], needle: &[T]) -> Option<usize>
+where
+    for<'a> &'a [T]: PartialEq,
+{
+    slice
+        .windows(needle.len())
+        .position(|window| window == needle)
+}
+
 /// Replaces the `from` slice with the `to` slice inside the `buf` slice.
 ///
 /// # Examples
--- a/rust/hg-core/src/utils/files.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/utils/files.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -127,7 +127,7 @@
 
     #[test]
     fn find_dirs_empty() {
-        // looks weird, but mercurial.util.finddirs(b"") yields b""
+        // looks weird, but mercurial.pathutil.finddirs(b"") yields b""
         let mut dirs = super::find_dirs(HgPath::new(b""));
         assert_eq!(dirs.next(), Some(HgPath::new(b"")));
         assert_eq!(dirs.next(), None);
--- a/rust/hg-core/src/utils/hg_path.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-core/src/utils/hg_path.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -7,6 +7,7 @@
 
 use std::borrow::Borrow;
 use std::ffi::{OsStr, OsString};
+use std::fmt;
 use std::ops::Deref;
 use std::path::{Path, PathBuf};
 
@@ -76,7 +77,7 @@
 // `#[repr(transparent)]`.
 // Anyway, `Slice` representation and layout are considered implementation
 // detail, are not documented and must not be relied upon.
-#[derive(Eq, Ord, PartialEq, PartialOrd, Debug, Hash)]
+#[derive(Eq, Ord, PartialEq, PartialOrd, Hash)]
 pub struct HgPath {
     inner: [u8],
 }
@@ -111,6 +112,9 @@
     pub fn contains(&self, other: u8) -> bool {
         self.inner.contains(&other)
     }
+    pub fn starts_with(&self, needle: impl AsRef<HgPath>) -> bool {
+        self.inner.starts_with(needle.as_ref().as_bytes())
+    }
     pub fn join<T: ?Sized + AsRef<HgPath>>(&self, other: &T) -> HgPathBuf {
         let mut inner = self.inner.to_owned();
         if inner.len() != 0 && inner.last() != Some(&b'/') {
@@ -119,6 +123,21 @@
         inner.extend(other.as_ref().bytes());
         HgPathBuf::from_bytes(&inner)
     }
+    /// Given a base directory, returns the slice of `self` relative to the
+    /// base directory. If `base` is not a directory (does not end with a
+    /// `b'/'`), returns `None`.
+    pub fn relative_to(&self, base: impl AsRef<HgPath>) -> Option<&HgPath> {
+        let base = base.as_ref();
+        if base.is_empty() {
+            return Some(self);
+        }
+        let is_dir = base.as_bytes().ends_with(b"/");
+        if is_dir && self.starts_with(base) {
+            Some(HgPath::new(&self.inner[base.len()..]))
+        } else {
+            None
+        }
+    }
     /// Checks for errors in the path, short-circuiting at the first one.
     /// This generates fine-grained errors useful for debugging.
     /// To simply check if the path is valid during tests, use `is_valid`.
@@ -162,7 +181,19 @@
     }
 }
 
-#[derive(Eq, Ord, Clone, PartialEq, PartialOrd, Debug, Hash)]
+impl fmt::Debug for HgPath {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "HgPath({:?})", String::from_utf8_lossy(&self.inner))
+    }
+}
+
+impl fmt::Display for HgPath {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}", String::from_utf8_lossy(&self.inner))
+    }
+}
+
+#[derive(Eq, Ord, Clone, PartialEq, PartialOrd, Hash)]
 pub struct HgPathBuf {
     inner: Vec<u8>,
 }
@@ -185,6 +216,18 @@
     }
 }
 
+impl fmt::Debug for HgPathBuf {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "HgPathBuf({:?})", String::from_utf8_lossy(&self.inner))
+    }
+}
+
+impl fmt::Display for HgPathBuf {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}", String::from_utf8_lossy(&self.inner))
+    }
+}
+
 impl Deref for HgPathBuf {
     type Target = HgPath;
 
@@ -399,4 +442,35 @@
         let path = HgPathBuf::from_bytes(b"a").join(HgPath::new(b"/b"));
         assert_eq!(b"a//b", path.as_bytes());
     }
+
+    #[test]
+    fn test_relative_to() {
+        let path = HgPath::new(b"");
+        let base = HgPath::new(b"");
+        assert_eq!(Some(path), path.relative_to(base));
+
+        let path = HgPath::new(b"path");
+        let base = HgPath::new(b"");
+        assert_eq!(Some(path), path.relative_to(base));
+
+        let path = HgPath::new(b"a");
+        let base = HgPath::new(b"b");
+        assert_eq!(None, path.relative_to(base));
+
+        let path = HgPath::new(b"a/b");
+        let base = HgPath::new(b"a");
+        assert_eq!(None, path.relative_to(base));
+
+        let path = HgPath::new(b"a/b");
+        let base = HgPath::new(b"a/");
+        assert_eq!(Some(HgPath::new(b"b")), path.relative_to(base));
+
+        let path = HgPath::new(b"nested/path/to/b");
+        let base = HgPath::new(b"nested/path/");
+        assert_eq!(Some(HgPath::new(b"to/b")), path.relative_to(base));
+
+        let path = HgPath::new(b"ends/with/dir/");
+        let base = HgPath::new(b"ends/");
+        assert_eq!(Some(HgPath::new(b"with/dir/")), path.relative_to(base));
+    }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/utils/path.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) Facebook, Inc. and its affiliates.
+ *
+ * This software may be used and distributed according to the terms of the
+ * GNU General Public License version 2.
+ */
+
+//! Path-related utilities.
+
+use std::env;
+#[cfg(not(unix))]
+use std::fs::rename;
+use std::fs::{self, remove_file as fs_remove_file};
+use std::io::{self, ErrorKind};
+use std::path::{Component, Path, PathBuf};
+
+use anyhow::Result;
+#[cfg(not(unix))]
+use tempfile::Builder;
+
+/// Normalize a canonicalized Path for display.
+///
+/// This removes the UNC prefix `\\?\` on Windows.
+pub fn normalize_for_display(path: &str) -> &str {
+    if cfg!(windows) && path.starts_with(r"\\?\") {
+        &path[4..]
+    } else {
+        path
+    }
+}
+
+/// Similar to [`normalize_for_display`]. But work on bytes.
+pub fn normalize_for_display_bytes(path: &[u8]) -> &[u8] {
+    if cfg!(windows) && path.starts_with(br"\\?\") {
+        &path[4..]
+    } else {
+        path
+    }
+}
+
+/// Return the absolute and normalized path without accessing the filesystem.
+///
+/// Unlike [`fs::canonicalize`], do not follow symlinks.
+///
+/// This function does not access the filesystem. Therefore it can behave
+/// differently from the kernel or other library functions in corner cases.
+/// For example:
+///
+/// - On some systems with symlink support, `foo/bar/..` and `foo` can be
+///   different as seen by the kernel, if `foo/bar` is a symlink. This function
+///   always returns `foo` in this case.
+/// - On Windows, the official normalization rules are much more complicated.
+///   See https://github.com/rust-lang/rust/pull/47363#issuecomment-357069527.
+///   For example, this function cannot translate "drive relative" path like
+///   "X:foo" to an absolute path.
+///
+/// Return an error if `std::env::current_dir()` fails or if this function
+/// fails to produce an absolute path.
+pub fn absolute(path: impl AsRef<Path>) -> io::Result<PathBuf> {
+    let path = path.as_ref();
+    let path = if path.is_absolute() {
+        path.to_path_buf()
+    } else {
+        std::env::current_dir()?.join(path)
+    };
+
+    if !path.is_absolute() {
+        return Err(io::Error::new(
+            io::ErrorKind::Other,
+            format!("cannot get absoltue path from {:?}", path),
+        ));
+    }
+
+    let mut result = PathBuf::new();
+    for component in path.components() {
+        match component {
+            Component::Normal(_)
+            | Component::RootDir
+            | Component::Prefix(_) => {
+                result.push(component);
+            }
+            Component::ParentDir => {
+                result.pop();
+            }
+            Component::CurDir => (),
+        }
+    }
+    Ok(result)
+}
+
+/// Remove the file pointed by `path`.
+#[cfg(unix)]
+pub fn remove_file<P: AsRef<Path>>(path: P) -> Result<()> {
+    fs_remove_file(path)?;
+    Ok(())
+}
+
+/// Remove the file pointed by `path`.
+///
+/// On Windows, removing a file can fail for various reasons, including if the
+/// file is memory mapped. This can happen when the repository is accessed
+/// concurrently while a background task is trying to remove a packfile. To
+/// solve this, we can rename the file before trying to remove it.
+/// If the remove operation fails, a future repack will clean it up.
+#[cfg(not(unix))]
+pub fn remove_file<P: AsRef<Path>>(path: P) -> Result<()> {
+    let path = path.as_ref();
+    let extension = path
+        .extension()
+        .and_then(|ext| ext.to_str())
+        .map_or(".to-delete".to_owned(), |ext| ".".to_owned() + ext + "-tmp");
+
+    let dest_path = Builder::new()
+        .prefix("")
+        .suffix(&extension)
+        .rand_bytes(8)
+        .tempfile_in(path.parent().unwrap())?
+        .into_temp_path();
+
+    rename(path, &dest_path)?;
+
+    // Ignore errors when removing the file, it will be cleaned up at a later
+    // time.
+    let _ = fs_remove_file(dest_path);
+    Ok(())
+}
+
+/// Create the directory and ignore failures when a directory of the same name
+/// already exists.
+pub fn create_dir(path: impl AsRef<Path>) -> io::Result<()> {
+    match fs::create_dir(path.as_ref()) {
+        Ok(()) => Ok(()),
+        Err(e) => {
+            if e.kind() == ErrorKind::AlreadyExists && path.as_ref().is_dir() {
+                Ok(())
+            } else {
+                Err(e)
+            }
+        }
+    }
+}
+
+/// Expand the user's home directory and any environment variables references
+/// in the given path.
+///
+/// This function is designed to emulate the behavior of Mercurial's
+/// `util.expandpath` function, which in turn uses Python's
+/// `os.path.expand{user,vars}` functions. This results in behavior that is
+/// notably different from the default expansion behavior of the `shellexpand`
+/// crate. In particular:
+///
+/// - If a reference to an environment variable is missing or invalid, the
+///   reference is left unchanged in the resulting path rather than emitting an
+///   error.
+///
+/// - Home directory expansion explicitly happens after environment variable
+///   expansion, meaning that if an environment variable is expanded into a
+///   string starting with a tilde (`~`), the tilde will be expanded into the
+///   user's home directory.
+pub fn expand_path(path: impl AsRef<str>) -> PathBuf {
+    expand_path_impl(path.as_ref(), |k| env::var(k).ok(), dirs::home_dir)
+}
+
+/// Same as `expand_path` but explicitly takes closures for environment
+/// variable and home directory lookup for the sake of testability.
+fn expand_path_impl<E, H>(path: &str, getenv: E, homedir: H) -> PathBuf
+where
+    E: FnMut(&str) -> Option<String>,
+    H: FnOnce() -> Option<PathBuf>,
+{
+    // The shellexpand crate does not expand Windows environment variables
+    // like `%PROGRAMDATA%`. We'd like to expand them too. So let's do some
+    // pre-processing.
+    //
+    // XXX: Doing this preprocessing has the unfortunate side-effect that
+    // if an environment variable fails to expand on Windows, the resulting
+    // string will contain a UNIX-style environment variable reference.
+    //
+    // e.g., "/foo/%MISSING%/bar" will expand to "/foo/${MISSING}/bar"
+    //
+    // The current approach is good enough for now, but likely needs to
+    // be improved later for correctness.
+    let path = {
+        let mut new_path = String::new();
+        let mut is_starting = true;
+        for ch in path.chars() {
+            if ch == '%' {
+                if is_starting {
+                    new_path.push_str("${");
+                } else {
+                    new_path.push('}');
+                }
+                is_starting = !is_starting;
+            } else if cfg!(windows) && ch == '/' {
+                // Only on Windows, change "/" to "\" automatically.
+                // This makes sure "%include /foo" works as expected.
+                new_path.push('\\')
+            } else {
+                new_path.push(ch);
+            }
+        }
+        new_path
+    };
+
+    let path = shellexpand::env_with_context_no_errors(&path, getenv);
+    shellexpand::tilde_with_context(&path, homedir)
+        .as_ref()
+        .into()
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use std::fs::File;
+
+    use tempfile::TempDir;
+
+    #[cfg(windows)]
+    mod windows {
+        use super::*;
+
+        #[test]
+        fn test_absolute_fullpath() {
+            assert_eq!(absolute("C:/foo").unwrap(), Path::new("C:\\foo"));
+            assert_eq!(
+                absolute("x:\\a/b\\./.\\c").unwrap(),
+                Path::new("x:\\a\\b\\c")
+            );
+            assert_eq!(
+                absolute("y:/a/b\\../..\\c\\../d\\./.").unwrap(),
+                Path::new("y:\\d")
+            );
+            assert_eq!(
+                absolute("z:/a/b\\../..\\../..\\..").unwrap(),
+                Path::new("z:\\")
+            );
+        }
+    }
+
+    #[cfg(unix)]
+    mod unix {
+        use super::*;
+
+        #[test]
+        fn test_absolute_fullpath() {
+            assert_eq!(
+                absolute("/a/./b\\c/../d/.").unwrap(),
+                Path::new("/a/d")
+            );
+            assert_eq!(absolute("/a/../../../../b").unwrap(), Path::new("/b"));
+            assert_eq!(absolute("/../../..").unwrap(), Path::new("/"));
+            assert_eq!(absolute("/../../../").unwrap(), Path::new("/"));
+            assert_eq!(
+                absolute("//foo///bar//baz").unwrap(),
+                Path::new("/foo/bar/baz")
+            );
+            assert_eq!(absolute("//").unwrap(), Path::new("/"));
+        }
+    }
+
+    #[test]
+    fn test_create_dir_non_exist() -> Result<()> {
+        let tempdir = TempDir::new()?;
+        let mut path = tempdir.path().to_path_buf();
+        path.push("dir");
+        create_dir(&path)?;
+        assert!(path.is_dir());
+        Ok(())
+    }
+
+    #[test]
+    fn test_create_dir_exist() -> Result<()> {
+        let tempdir = TempDir::new()?;
+        let mut path = tempdir.path().to_path_buf();
+        path.push("dir");
+        create_dir(&path)?;
+        assert!(&path.is_dir());
+        create_dir(&path)?;
+        assert!(&path.is_dir());
+        Ok(())
+    }
+
+    #[test]
+    fn test_create_dir_file_exist() -> Result<()> {
+        let tempdir = TempDir::new()?;
+        let mut path = tempdir.path().to_path_buf();
+        path.push("dir");
+        File::create(&path)?;
+        let err = create_dir(&path).unwrap_err();
+        assert_eq!(err.kind(), ErrorKind::AlreadyExists);
+        Ok(())
+    }
+
+    #[test]
+    fn test_path_expansion() {
+        fn getenv(key: &str) -> Option<String> {
+            match key {
+                "foo" => Some("~/a".into()),
+                "bar" => Some("b".into()),
+                _ => None,
+            }
+        }
+
+        fn homedir() -> Option<PathBuf> {
+            Some(PathBuf::from("/home/user"))
+        }
+
+        let path = "$foo/${bar}/$baz";
+        let expected = PathBuf::from("/home/user/a/b/$baz");
+
+        assert_eq!(expand_path_impl(&path, getenv, homedir), expected);
+    }
+}
--- a/rust/hg-cpython/src/ancestors.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/ancestors.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -34,6 +34,7 @@
 //! [`LazyAncestors`]: struct.LazyAncestors.html
 //! [`MissingAncestors`]: struct.MissingAncestors.html
 //! [`AncestorsIterator`]: struct.AncestorsIterator.html
+use crate::revlog::pyindex_to_graph;
 use crate::{
     cindex::Index, conversion::rev_pyiter_collect, exceptions::GraphError,
 };
@@ -73,7 +74,7 @@
                 inclusive: bool) -> PyResult<AncestorsIterator> {
         let initvec: Vec<Revision> = rev_pyiter_collect(py, &initrevs)?;
         let ait = CoreIterator::new(
-            Index::new(py, index)?,
+            pyindex_to_graph(py, index)?,
             initvec,
             stoprev,
             inclusive,
@@ -113,7 +114,8 @@
         let initvec: Vec<Revision> = rev_pyiter_collect(py, &initrevs)?;
 
         let lazy =
-            CoreLazy::new(Index::new(py, index)?, initvec, stoprev, inclusive)
+            CoreLazy::new(pyindex_to_graph(py, index)?,
+                          initvec, stoprev, inclusive)
                 .map_err(|e| GraphError::pynew(py, e))?;
 
         Self::create_instance(py, RefCell::new(Box::new(lazy)))
@@ -126,7 +128,7 @@
 
     def __new__(_cls, index: PyObject, bases: PyObject) -> PyResult<MissingAncestors> {
         let bases_vec: Vec<Revision> = rev_pyiter_collect(py, &bases)?;
-        let inner = CoreMissing::new(Index::new(py, index)?, bases_vec);
+        let inner = CoreMissing::new(pyindex_to_graph(py, index)?, bases_vec);
         MissingAncestors::create_instance(py, RefCell::new(Box::new(inner)))
     }
 
--- a/rust/hg-cpython/src/cindex.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/cindex.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -10,19 +10,25 @@
 //! Ideally, we should use an Index entirely implemented in Rust,
 //! but this will take some time to get there.
 
-use cpython::{PyClone, PyObject, PyResult, Python};
+use cpython::{exc::ImportError, PyClone, PyErr, PyObject, PyResult, Python};
 use hg::{Graph, GraphError, Revision, WORKING_DIRECTORY_REVISION};
 use libc::c_int;
 
-py_capsule_fn!(
-    from mercurial.cext.parsers import index_get_parents_CAPI
-        as get_parents_capi
-        signature (
-            index: *mut RawPyObject,
-            rev: c_int,
-            ps: *mut [c_int; 2],
-        ) -> c_int
-);
+const REVLOG_CABI_VERSION: c_int = 1;
+
+#[repr(C)]
+pub struct Revlog_CAPI {
+    abi_version: c_int,
+    index_parents: unsafe extern "C" fn(
+        index: *mut revlog_capi::RawPyObject,
+        rev: c_int,
+        ps: *mut [c_int; 2],
+    ) -> c_int,
+}
+
+py_capsule!(
+    from mercurial.cext.parsers import revlog_CAPI
+        as revlog_capi for Revlog_CAPI);
 
 /// A `Graph` backed up by objects and functions from revlog.c
 ///
@@ -58,16 +64,32 @@
 /// mechanisms in other contexts.
 pub struct Index {
     index: PyObject,
-    parents: get_parents_capi::CapsuleFn,
+    capi: &'static Revlog_CAPI,
 }
 
 impl Index {
     pub fn new(py: Python, index: PyObject) -> PyResult<Self> {
+        let capi = unsafe { revlog_capi::retrieve(py)? };
+        if capi.abi_version != REVLOG_CABI_VERSION {
+            return Err(PyErr::new::<ImportError, _>(
+                py,
+                format!(
+                    "ABI version mismatch: the C ABI revlog version {} \
+                     does not match the {} expected by Rust hg-cpython",
+                    capi.abi_version, REVLOG_CABI_VERSION
+                ),
+            ));
+        }
         Ok(Index {
             index: index,
-            parents: get_parents_capi::retrieve(py)?,
+            capi: capi,
         })
     }
+
+    /// return a reference to the CPython Index object in this Struct
+    pub fn inner(&self) -> &PyObject {
+        &self.index
+    }
 }
 
 impl Clone for Index {
@@ -75,7 +97,16 @@
         let guard = Python::acquire_gil();
         Index {
             index: self.index.clone_ref(guard.python()),
-            parents: self.parents.clone(),
+            capi: self.capi,
+        }
+    }
+}
+
+impl PyClone for Index {
+    fn clone_ref(&self, py: Python) -> Self {
+        Index {
+            index: self.index.clone_ref(py),
+            capi: self.capi,
         }
     }
 }
@@ -88,7 +119,7 @@
         }
         let mut res: [c_int; 2] = [0; 2];
         let code = unsafe {
-            (self.parents)(
+            (self.capi.index_parents)(
                 self.index.as_ptr(),
                 rev as c_int,
                 &mut res as *mut [c_int; 2],
--- a/rust/hg-cpython/src/dagops.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/dagops.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -9,14 +9,14 @@
 //! `hg-core` package.
 //!
 //! From Python, this will be seen as `mercurial.rustext.dagop`
-use crate::{
-    cindex::Index, conversion::rev_pyiter_collect, exceptions::GraphError,
-};
+use crate::{conversion::rev_pyiter_collect, exceptions::GraphError};
 use cpython::{PyDict, PyModule, PyObject, PyResult, Python};
 use hg::dagops;
 use hg::Revision;
 use std::collections::HashSet;
 
+use crate::revlog::pyindex_to_graph;
+
 /// Using the the `index`, return heads out of any Python iterable of Revisions
 ///
 /// This is the Rust counterpart for `mercurial.dagop.headrevs`
@@ -26,7 +26,7 @@
     revs: PyObject,
 ) -> PyResult<HashSet<Revision>> {
     let mut as_set: HashSet<Revision> = rev_pyiter_collect(py, &revs)?;
-    dagops::retain_heads(&Index::new(py, index)?, &mut as_set)
+    dagops::retain_heads(&pyindex_to_graph(py, index)?, &mut as_set)
         .map_err(|e| GraphError::pynew(py, e))?;
     Ok(as_set)
 }
--- a/rust/hg-cpython/src/dirstate.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/dirstate.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -17,8 +17,8 @@
     dirs_multiset::Dirs, dirstate_map::DirstateMap, status::status_wrapper,
 };
 use cpython::{
-    exc, PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult,
-    PySequence, Python,
+    exc, PyBytes, PyDict, PyErr, PyModule, PyObject, PyResult, PySequence,
+    Python,
 };
 use hg::{
     utils::hg_path::HgPathBuf, DirstateEntry, DirstateParseError, EntryState,
@@ -116,7 +116,7 @@
             status_wrapper(
                 dmap: DirstateMap,
                 root_dir: PyObject,
-                files: PyList,
+                matcher: PyObject,
                 list_clean: bool,
                 last_normal_time: i64,
                 check_exec: bool
--- a/rust/hg-cpython/src/dirstate/copymap.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/dirstate/copymap.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -12,7 +12,7 @@
 use std::cell::RefCell;
 
 use crate::dirstate::dirstate_map::DirstateMap;
-use crate::ref_sharing::PyLeakedRef;
+use crate::ref_sharing::PyLeaked;
 use hg::{utils::hg_path::HgPathBuf, CopyMapIter};
 
 py_class!(pub class CopyMap |py| {
@@ -104,14 +104,14 @@
 
 py_shared_iterator!(
     CopyMapKeysIterator,
-    PyLeakedRef<CopyMapIter<'static>>,
+    PyLeaked<CopyMapIter<'static>>,
     CopyMap::translate_key,
     Option<PyBytes>
 );
 
 py_shared_iterator!(
     CopyMapItemsIterator,
-    PyLeakedRef<CopyMapIter<'static>>,
+    PyLeaked<CopyMapIter<'static>>,
     CopyMap::translate_key_value,
     Option<(PyBytes, PyBytes)>
 );
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -17,7 +17,7 @@
 };
 
 use crate::dirstate::extract_dirstate;
-use crate::ref_sharing::{PyLeakedRef, PySharedRefCell};
+use crate::ref_sharing::{PyLeaked, PySharedRefCell};
 use hg::{
     utils::hg_path::{HgPath, HgPathBuf},
     DirsMultiset, DirsMultisetIter, DirstateMapError, DirstateParseError,
@@ -47,6 +47,9 @@
         let inner = if let Ok(map) = map.cast_as::<PyDict>(py) {
             let dirstate = extract_dirstate(py, &map)?;
             DirsMultiset::from_dirstate(&dirstate, skip_state)
+                .map_err(|e| {
+                    PyErr::new::<exc::ValueError, _>(py, e.to_string())
+                })?
         } else {
             let map: Result<Vec<HgPathBuf>, PyErr> = map
                 .iter(py)?
@@ -57,6 +60,9 @@
                 })
                 .collect();
             DirsMultiset::from_manifest(&map?)
+                .map_err(|e| {
+                    PyErr::new::<exc::ValueError, _>(py, e.to_string())
+                })?
         };
 
         Self::create_instance(
@@ -68,8 +74,19 @@
     def addpath(&self, path: PyObject) -> PyResult<PyObject> {
         self.inner_shared(py).borrow_mut()?.add_path(
             HgPath::new(path.extract::<PyBytes>(py)?.data(py)),
-        );
-        Ok(py.None())
+        ).and(Ok(py.None())).or_else(|e| {
+            match e {
+                DirstateMapError::EmptyPath => {
+                    Ok(py.None())
+                },
+                e => {
+                    Err(PyErr::new::<exc::ValueError, _>(
+                        py,
+                        e.to_string(),
+                    ))
+                }
+            }
+        })
     }
 
     def delpath(&self, path: PyObject) -> PyResult<PyObject> {
@@ -79,20 +96,20 @@
             .and(Ok(py.None()))
             .or_else(|e| {
                 match e {
-                    DirstateMapError::PathNotFound(_p) => {
+                    DirstateMapError::EmptyPath => {
+                        Ok(py.None())
+                    },
+                    e => {
                         Err(PyErr::new::<exc::ValueError, _>(
                             py,
-                            "expected a value, found none".to_string(),
+                            e.to_string(),
                         ))
                     }
-                    DirstateMapError::EmptyPath => {
-                        Ok(py.None())
-                    }
                 }
             })
     }
     def __iter__(&self) -> PyResult<DirsMultisetKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        let leaked_ref = self.inner_shared(py).leak_immutable();
         DirsMultisetKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -123,7 +140,7 @@
 
 py_shared_iterator!(
     DirsMultisetKeysIterator,
-    PyLeakedRef<DirsMultisetIter<'static>>,
+    PyLeaked<DirsMultisetIter<'static>>,
     Dirs::translate_key,
     Option<PyBytes>
 );
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -20,13 +20,13 @@
 use crate::{
     dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
     dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
-    ref_sharing::{PyLeakedRef, PySharedRefCell},
+    ref_sharing::{PyLeaked, PySharedRefCell},
 };
 use hg::{
     utils::hg_path::{HgPath, HgPathBuf},
     DirsMultiset, DirstateEntry, DirstateMap as RustDirstateMap,
-    DirstateParents, DirstateParseError, EntryState, StateMapIter,
-    PARENT_SIZE,
+    DirstateMapError, DirstateParents, DirstateParseError, EntryState,
+    StateMapIter, PARENT_SIZE,
 };
 
 // TODO
@@ -97,8 +97,9 @@
                 size: size.extract(py)?,
                 mtime: mtime.extract(py)?,
             },
-        );
-        Ok(py.None())
+        ).and(Ok(py.None())).or_else(|e: DirstateMapError| {
+            Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
+        })
     }
 
     def removefile(
@@ -199,6 +200,9 @@
         let d = d.extract::<PyBytes>(py)?;
         Ok(self.inner_shared(py).borrow_mut()?
             .has_tracked_dir(HgPath::new(d.data(py)))
+            .map_err(|e| {
+                PyErr::new::<exc::ValueError, _>(py, e.to_string())
+            })?
             .to_py_object(py))
     }
 
@@ -206,6 +210,9 @@
         let d = d.extract::<PyBytes>(py)?;
         Ok(self.inner_shared(py).borrow_mut()?
             .has_dir(HgPath::new(d.data(py)))
+            .map_err(|e| {
+                PyErr::new::<exc::ValueError, _>(py, e.to_string())
+            })?
             .to_py_object(py))
     }
 
@@ -304,7 +311,7 @@
     }
 
     def keys(&self) -> PyResult<DirstateMapKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        let leaked_ref = self.inner_shared(py).leak_immutable();
         DirstateMapKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -312,7 +319,7 @@
     }
 
     def items(&self) -> PyResult<DirstateMapItemsIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        let leaked_ref = self.inner_shared(py).leak_immutable();
         DirstateMapItemsIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -320,7 +327,7 @@
     }
 
     def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        let leaked_ref = self.inner_shared(py).leak_immutable();
         DirstateMapKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -329,24 +336,35 @@
 
     def getdirs(&self) -> PyResult<Dirs> {
         // TODO don't copy, share the reference
-        self.inner_shared(py).borrow_mut()?.set_dirs();
+        self.inner_shared(py).borrow_mut()?.set_dirs()
+            .map_err(|e| {
+                PyErr::new::<exc::ValueError, _>(py, e.to_string())
+            })?;
         Dirs::from_inner(
             py,
             DirsMultiset::from_dirstate(
                 &self.inner_shared(py).borrow(),
                 Some(EntryState::Removed),
-            ),
+            )
+            .map_err(|e| {
+                PyErr::new::<exc::ValueError, _>(py, e.to_string())
+            })?,
         )
     }
     def getalldirs(&self) -> PyResult<Dirs> {
         // TODO don't copy, share the reference
-        self.inner_shared(py).borrow_mut()?.set_all_dirs();
+        self.inner_shared(py).borrow_mut()?.set_all_dirs()
+            .map_err(|e| {
+                PyErr::new::<exc::ValueError, _>(py, e.to_string())
+            })?;
         Dirs::from_inner(
             py,
             DirsMultiset::from_dirstate(
                 &self.inner_shared(py).borrow(),
                 None,
-            ),
+            ).map_err(|e| {
+                PyErr::new::<exc::ValueError, _>(py, e.to_string())
+            })?,
         )
     }
 
@@ -437,7 +455,7 @@
     }
 
     def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        let leaked_ref = self.inner_shared(py).leak_immutable();
         CopyMapKeysIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
@@ -445,7 +463,7 @@
     }
 
     def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
-        let leaked_ref = self.inner_shared(py).leak_immutable()?;
+        let leaked_ref = self.inner_shared(py).leak_immutable();
         CopyMapItemsIterator::from_inner(
             py,
             unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
@@ -483,14 +501,14 @@
 
 py_shared_iterator!(
     DirstateMapKeysIterator,
-    PyLeakedRef<StateMapIter<'static>>,
+    PyLeaked<StateMapIter<'static>>,
     DirstateMap::translate_key,
     Option<PyBytes>
 );
 
 py_shared_iterator!(
     DirstateMapItemsIterator,
-    PyLeakedRef<StateMapIter<'static>>,
+    PyLeaked<StateMapIter<'static>>,
     DirstateMap::translate_key_value,
     Option<(PyBytes, PyObject)>
 );
--- a/rust/hg-cpython/src/dirstate/status.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/dirstate/status.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -6,19 +6,23 @@
 // GNU General Public License version 2 or any later version.
 
 //! Bindings for the `hg::status` module provided by the
-//! `hg-core` crate. From Python, this will be seen as `rustext.dirstate.status`.
-//!
+//! `hg-core` crate. From Python, this will be seen as
+//! `rustext.dirstate.status`.
 
 use crate::dirstate::DirstateMap;
 use cpython::exc::ValueError;
 use cpython::{
-    PyBytes, PyErr, PyList, PyObject, PyResult, Python, PythonObject,
-    ToPyObject,
+    ObjectProtocol, PyBytes, PyErr, PyList, PyObject, PyResult, PyTuple,
+    Python, PythonObject, ToPyObject,
 };
-use hg::utils::files::get_path_from_bytes;
-
-use hg::utils::hg_path::HgPath;
-use hg::{status, utils::hg_path::HgPathBuf};
+use hg::utils::hg_path::HgPathBuf;
+use hg::{
+    matchers::{AlwaysMatcher, FileMatcher},
+    status,
+    utils::{files::get_path_from_bytes, hg_path::HgPath},
+    StatusResult,
+};
+use std::borrow::Borrow;
 
 /// This will be useless once trait impls for collection are added to `PyBytes`
 /// upstream.
@@ -42,8 +46,8 @@
 pub fn status_wrapper(
     py: Python,
     dmap: DirstateMap,
+    matcher: PyObject,
     root_dir: PyObject,
-    files: PyList,
     list_clean: bool,
     last_normal_time: i64,
     check_exec: bool,
@@ -54,22 +58,65 @@
     let dmap: DirstateMap = dmap.to_py_object(py);
     let dmap = dmap.get_inner(py);
 
-    let files: PyResult<Vec<HgPathBuf>> = files
-        .iter(py)
-        .map(|f| Ok(HgPathBuf::from_bytes(f.extract::<PyBytes>(py)?.data(py))))
-        .collect();
-    let files = files?;
+    match matcher.get_type(py).name(py).borrow() {
+        "alwaysmatcher" => {
+            let matcher = AlwaysMatcher;
+            let (lookup, status_res) = status(
+                &dmap,
+                &matcher,
+                &root_dir,
+                list_clean,
+                last_normal_time,
+                check_exec,
+            )
+            .map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
+            build_response(lookup, status_res, py)
+        }
+        "exactmatcher" => {
+            let files = matcher.call_method(
+                py,
+                "files",
+                PyTuple::new(py, &[]),
+                None,
+            )?;
+            let files: PyList = files.cast_into(py)?;
+            let files: PyResult<Vec<HgPathBuf>> = files
+                .iter(py)
+                .map(|f| {
+                    Ok(HgPathBuf::from_bytes(
+                        f.extract::<PyBytes>(py)?.data(py),
+                    ))
+                })
+                .collect();
 
-    let (lookup, status_res) = status(
-        &dmap,
-        &root_dir,
-        &files,
-        list_clean,
-        last_normal_time,
-        check_exec,
-    )
-    .map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
+            let files = files?;
+            let matcher = FileMatcher::new(&files)
+                .map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
+            let (lookup, status_res) = status(
+                &dmap,
+                &matcher,
+                &root_dir,
+                list_clean,
+                last_normal_time,
+                check_exec,
+            )
+            .map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
+            build_response(lookup, status_res, py)
+        }
+        e => {
+            return Err(PyErr::new::<ValueError, _>(
+                py,
+                format!("Unsupported matcher {}", e),
+            ));
+        }
+    }
+}
 
+fn build_response(
+    lookup: Vec<&HgPath>,
+    status_res: StatusResult,
+    py: Python,
+) -> PyResult<(PyList, PyList, PyList, PyList, PyList, PyList, PyList)> {
     let modified = collect_pybytes_list(py, status_res.modified.as_ref());
     let added = collect_pybytes_list(py, status_res.added.as_ref());
     let removed = collect_pybytes_list(py, status_res.removed.as_ref());
--- a/rust/hg-cpython/src/discovery.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/discovery.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -25,6 +25,8 @@
 
 use std::cell::RefCell;
 
+use crate::revlog::pyindex_to_graph;
+
 py_class!(pub class PartialDiscovery |py| {
     data inner: RefCell<Box<CorePartialDiscovery<Index>>>;
 
@@ -42,7 +44,7 @@
         Self::create_instance(
             py,
             RefCell::new(Box::new(CorePartialDiscovery::new(
-                Index::new(py, index)?,
+                pyindex_to_graph(py, index)?,
                 rev_pyiter_collect(py, &targetheads)?,
                 respectsize,
                 randomize,
--- a/rust/hg-cpython/src/filepatterns.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/filepatterns.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -10,7 +10,6 @@
 //! `hg-core` crate. From Python, this will be seen as `rustext.filepatterns`
 //! and can be used as replacement for the the pure `filepatterns` Python
 //! module.
-//!
 use crate::exceptions::{PatternError, PatternFileError};
 use cpython::{
     PyBytes, PyDict, PyModule, PyObject, PyResult, PyTuple, Python, ToPyObject,
--- a/rust/hg-cpython/src/lib.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/lib.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -35,6 +35,7 @@
 pub mod exceptions;
 pub mod filepatterns;
 pub mod parsers;
+pub mod revlog;
 pub mod utils;
 
 py_module_initializer!(rustext, initrustext, PyInit_rustext, |py, m| {
@@ -49,6 +50,7 @@
     m.add(py, "dagop", dagops::init_module(py, &dotted_name)?)?;
     m.add(py, "discovery", discovery::init_module(py, &dotted_name)?)?;
     m.add(py, "dirstate", dirstate::init_module(py, &dotted_name)?)?;
+    m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?;
     m.add(
         py,
         "filepatterns",
--- a/rust/hg-cpython/src/parsers.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/parsers.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -9,16 +9,15 @@
 //! `hg-core` package.
 //!
 //! From Python, this will be seen as `mercurial.rustext.parsers`
-//!
 use cpython::{
     exc, PyBytes, PyDict, PyErr, PyInt, PyModule, PyResult, PyTuple, Python,
     PythonObject, ToPyObject,
 };
 use hg::{
     pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf,
-    DirstatePackError, DirstateParents, DirstateParseError, PARENT_SIZE,
+    DirstatePackError, DirstateParents, DirstateParseError, FastHashMap,
+    PARENT_SIZE,
 };
-use std::collections::HashMap;
 use std::convert::TryInto;
 
 use crate::dirstate::{extract_dirstate, make_dirstate_tuple};
@@ -30,8 +29,8 @@
     copymap: PyDict,
     st: PyBytes,
 ) -> PyResult<PyTuple> {
-    let mut dirstate_map = HashMap::new();
-    let mut copies = HashMap::new();
+    let mut dirstate_map = FastHashMap::default();
+    let mut copies = FastHashMap::default();
 
     match parse_dirstate(&mut dirstate_map, &mut copies, st.data(py)) {
         Ok(parents) => {
@@ -86,7 +85,7 @@
 
     let mut dirstate_map = extract_dirstate(py, &dmap)?;
 
-    let copies: Result<HashMap<HgPathBuf, HgPathBuf>, PyErr> = copymap
+    let copies: Result<FastHashMap<HgPathBuf, HgPathBuf>, PyErr> = copymap
         .items(py)
         .iter()
         .map(|(key, value)| {
--- a/rust/hg-cpython/src/ref_sharing.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hg-cpython/src/ref_sharing.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -23,53 +23,56 @@
 //! Macros for use in the `hg-cpython` bridge library.
 
 use crate::exceptions::AlreadyBorrowed;
-use cpython::{PyClone, PyObject, PyResult, Python};
-use std::cell::{Cell, Ref, RefCell, RefMut};
+use cpython::{exc, PyClone, PyErr, PyObject, PyResult, Python};
+use std::cell::{Ref, RefCell, RefMut};
+use std::ops::{Deref, DerefMut};
+use std::sync::atomic::{AtomicUsize, Ordering};
 
 /// Manages the shared state between Python and Rust
+///
+/// `PySharedState` is owned by `PySharedRefCell`, and is shared across its
+/// derived references. The consistency of these references are guaranteed
+/// as follows:
+///
+/// - The immutability of `py_class!` object fields. Any mutation of
+///   `PySharedRefCell` is allowed only through its `borrow_mut()`.
+/// - The `py: Python<'_>` token, which makes sure that any data access is
+///   synchronized by the GIL.
+/// - The underlying `RefCell`, which prevents `PySharedRefCell` data from
+///   being directly borrowed or leaked while it is mutably borrowed.
+/// - The `borrow_count`, which is the number of references borrowed from
+///   `PyLeaked`. Just like `RefCell`, mutation is prohibited while `PyLeaked`
+///   is borrowed.
+/// - The `generation` counter, which increments on `borrow_mut()`. `PyLeaked`
+///   reference is valid only if the `current_generation()` equals to the
+///   `generation` at the time of `leak_immutable()`.
 #[derive(Debug, Default)]
 struct PySharedState {
-    leak_count: Cell<usize>,
-    mutably_borrowed: Cell<bool>,
+    // The counter variable could be Cell<usize> since any operation on
+    // PySharedState is synchronized by the GIL, but being "atomic" makes
+    // PySharedState inherently Sync. The ordering requirement doesn't
+    // matter thanks to the GIL.
+    borrow_count: AtomicUsize,
+    generation: AtomicUsize,
 }
 
-// &PySharedState can be Send because any access to inner cells is
-// synchronized by the GIL.
-unsafe impl Sync for PySharedState {}
-
 impl PySharedState {
     fn borrow_mut<'a, T>(
         &'a self,
         py: Python<'a>,
         pyrefmut: RefMut<'a, T>,
-    ) -> PyResult<PyRefMut<'a, T>> {
-        if self.mutably_borrowed.get() {
-            return Err(AlreadyBorrowed::new(
-                py,
-                "Cannot borrow mutably while there exists another \
-                 mutable reference in a Python object",
-            ));
-        }
-        match self.leak_count.get() {
+    ) -> PyResult<RefMut<'a, T>> {
+        match self.current_borrow_count(py) {
             0 => {
-                self.mutably_borrowed.replace(true);
-                Ok(PyRefMut::new(py, pyrefmut, self))
+                // Note that this wraps around to the same value if mutably
+                // borrowed more than usize::MAX times, which wouldn't happen
+                // in practice.
+                self.generation.fetch_add(1, Ordering::Relaxed);
+                Ok(pyrefmut)
             }
-            // TODO
-            // For now, this works differently than Python references
-            // in the case of iterators.
-            // Python does not complain when the data an iterator
-            // points to is modified if the iterator is never used
-            // afterwards.
-            // Here, we are stricter than this by refusing to give a
-            // mutable reference if it is already borrowed.
-            // While the additional safety might be argued for, it
-            // breaks valid programming patterns in Python and we need
-            // to fix this issue down the line.
             _ => Err(AlreadyBorrowed::new(
                 py,
-                "Cannot borrow mutably while there are \
-                 immutable references in Python objects",
+                "Cannot borrow mutably while immutably borrowed",
             )),
         }
     }
@@ -84,41 +87,60 @@
     /// extended. Do not call this function directly.
     unsafe fn leak_immutable<T>(
         &self,
-        py: Python,
-        data: &PySharedRefCell<T>,
-    ) -> PyResult<(&'static T, &'static PySharedState)> {
-        if self.mutably_borrowed.get() {
-            return Err(AlreadyBorrowed::new(
-                py,
-                "Cannot borrow immutably while there is a \
-                 mutable reference in Python objects",
-            ));
-        }
-        // TODO: it's weird that self is data.py_shared_state. Maybe we
-        // can move stuff to PySharedRefCell?
-        let ptr = data.as_ptr();
-        let state_ptr: *const PySharedState = &data.py_shared_state;
-        self.leak_count.replace(self.leak_count.get() + 1);
-        Ok((&*ptr, &*state_ptr))
+        _py: Python,
+        data: Ref<T>,
+    ) -> (&'static T, &'static PySharedState) {
+        let ptr: *const T = &*data;
+        let state_ptr: *const PySharedState = self;
+        (&*ptr, &*state_ptr)
+    }
+
+    fn current_borrow_count(&self, _py: Python) -> usize {
+        self.borrow_count.load(Ordering::Relaxed)
+    }
+
+    fn increase_borrow_count(&self, _py: Python) {
+        // Note that this wraps around if there are more than usize::MAX
+        // borrowed references, which shouldn't happen due to memory limit.
+        self.borrow_count.fetch_add(1, Ordering::Relaxed);
+    }
+
+    fn decrease_borrow_count(&self, _py: Python) {
+        let prev_count = self.borrow_count.fetch_sub(1, Ordering::Relaxed);
+        assert!(prev_count > 0);
     }
 
-    /// # Safety
-    ///
-    /// It's up to you to make sure the reference is about to be deleted
-    /// when updating the leak count.
-    fn decrease_leak_count(&self, _py: Python, mutable: bool) {
-        if mutable {
-            assert_eq!(self.leak_count.get(), 0);
-            assert!(self.mutably_borrowed.get());
-            self.mutably_borrowed.replace(false);
-        } else {
-            let count = self.leak_count.get();
-            assert!(count > 0);
-            self.leak_count.replace(count - 1);
+    fn current_generation(&self, _py: Python) -> usize {
+        self.generation.load(Ordering::Relaxed)
+    }
+}
+
+/// Helper to keep the borrow count updated while the shared object is
+/// immutably borrowed without using the `RefCell` interface.
+struct BorrowPyShared<'a> {
+    py: Python<'a>,
+    py_shared_state: &'a PySharedState,
+}
+
+impl<'a> BorrowPyShared<'a> {
+    fn new(
+        py: Python<'a>,
+        py_shared_state: &'a PySharedState,
+    ) -> BorrowPyShared<'a> {
+        py_shared_state.increase_borrow_count(py);
+        BorrowPyShared {
+            py,
+            py_shared_state,
         }
     }
 }
 
+impl Drop for BorrowPyShared<'_> {
+    fn drop(&mut self) {
+        self.py_shared_state.decrease_borrow_count(self.py);
+    }
+}
+
 /// `RefCell` wrapper to be safely used in conjunction with `PySharedState`.
 ///
 /// This object can be stored in a `py_class!` object as a data field. Any
@@ -144,15 +166,11 @@
         self.inner.borrow()
     }
 
-    fn as_ptr(&self) -> *mut T {
-        self.inner.as_ptr()
-    }
-
     // TODO: maybe this should be named as try_borrow_mut(), and use
     // inner.try_borrow_mut(). The current implementation panics if
     // self.inner has been borrowed, but returns error if py_shared_state
     // refuses to borrow.
-    fn borrow_mut<'a>(&'a self, py: Python<'a>) -> PyResult<PyRefMut<'a, T>> {
+    fn borrow_mut<'a>(&'a self, py: Python<'a>) -> PyResult<RefMut<'a, T>> {
         self.py_shared_state.borrow_mut(py, self.inner.borrow_mut())
     }
 }
@@ -181,78 +199,31 @@
         self.data.borrow(self.py)
     }
 
-    pub fn borrow_mut(&self) -> PyResult<PyRefMut<'a, T>> {
+    pub fn borrow_mut(&self) -> PyResult<RefMut<'a, T>> {
         self.data.borrow_mut(self.py)
     }
 
     /// Returns a leaked reference.
-    pub fn leak_immutable(&self) -> PyResult<PyLeakedRef<&'static T>> {
+    ///
+    /// # Panics
+    ///
+    /// Panics if this is mutably borrowed.
+    pub fn leak_immutable(&self) -> PyLeaked<&'static T> {
         let state = &self.data.py_shared_state;
+        // make sure self.data isn't mutably borrowed; otherwise the
+        // generation number can't be trusted.
+        let data_ref = self.borrow();
         unsafe {
             let (static_ref, static_state_ref) =
-                state.leak_immutable(self.py, self.data)?;
-            Ok(PyLeakedRef::new(
-                self.py,
-                self.owner,
-                static_ref,
-                static_state_ref,
-            ))
+                state.leak_immutable(self.py, data_ref);
+            PyLeaked::new(self.py, self.owner, static_ref, static_state_ref)
         }
     }
 }
 
-/// Holds a mutable reference to data shared between Python and Rust.
-pub struct PyRefMut<'a, T> {
-    py: Python<'a>,
-    inner: RefMut<'a, T>,
-    py_shared_state: &'a PySharedState,
-}
-
-impl<'a, T> PyRefMut<'a, T> {
-    // Must be constructed by PySharedState after checking its leak_count.
-    // Otherwise, drop() would incorrectly update the state.
-    fn new(
-        py: Python<'a>,
-        inner: RefMut<'a, T>,
-        py_shared_state: &'a PySharedState,
-    ) -> Self {
-        Self {
-            py,
-            inner,
-            py_shared_state,
-        }
-    }
-}
-
-impl<'a, T> std::ops::Deref for PyRefMut<'a, T> {
-    type Target = RefMut<'a, T>;
-
-    fn deref(&self) -> &Self::Target {
-        &self.inner
-    }
-}
-impl<'a, T> std::ops::DerefMut for PyRefMut<'a, T> {
-    fn deref_mut(&mut self) -> &mut Self::Target {
-        &mut self.inner
-    }
-}
-
-impl<'a, T> Drop for PyRefMut<'a, T> {
-    fn drop(&mut self) {
-        self.py_shared_state.decrease_leak_count(self.py, true);
-    }
-}
-
 /// Allows a `py_class!` generated struct to share references to one of its
 /// data members with Python.
 ///
-/// # Warning
-///
-/// TODO allow Python container types: for now, integration with the garbage
-///     collector does not extend to Rust structs holding references to Python
-///     objects. Should the need surface, `__traverse__` and `__clear__` will
-///     need to be written as per the `rust-cpython` docs on GC integration.
-///
 /// # Parameters
 ///
 /// * `$name` is the same identifier used in for `py_class!` macro call.
@@ -307,16 +278,22 @@
 }
 
 /// Manage immutable references to `PyObject` leaked into Python iterators.
-pub struct PyLeakedRef<T> {
+///
+/// This reference will be invalidated once the original value is mutably
+/// borrowed.
+pub struct PyLeaked<T> {
     inner: PyObject,
     data: Option<T>,
     py_shared_state: &'static PySharedState,
+    /// Generation counter of data `T` captured when PyLeaked is created.
+    generation: usize,
 }
 
-// DO NOT implement Deref for PyLeakedRef<T>! Dereferencing PyLeakedRef
-// without taking Python GIL wouldn't be safe.
+// DO NOT implement Deref for PyLeaked<T>! Dereferencing PyLeaked
+// without taking Python GIL wouldn't be safe. Also, the underling reference
+// is invalid if generation != py_shared_state.generation.
 
-impl<T> PyLeakedRef<T> {
+impl<T> PyLeaked<T> {
     /// # Safety
     ///
     /// The `py_shared_state` must be owned by the `inner` Python object.
@@ -330,20 +307,39 @@
             inner: inner.clone_ref(py),
             data: Some(data),
             py_shared_state,
+            generation: py_shared_state.current_generation(py),
         }
     }
 
-    /// Returns an immutable reference to the inner value.
-    pub fn get_ref<'a>(&'a self, _py: Python<'a>) -> &'a T {
-        self.data.as_ref().unwrap()
+    /// Immutably borrows the wrapped value.
+    ///
+    /// Borrowing fails if the underlying reference has been invalidated.
+    pub fn try_borrow<'a>(
+        &'a self,
+        py: Python<'a>,
+    ) -> PyResult<PyLeakedRef<'a, T>> {
+        self.validate_generation(py)?;
+        Ok(PyLeakedRef {
+            _borrow: BorrowPyShared::new(py, self.py_shared_state),
+            data: self.data.as_ref().unwrap(),
+        })
     }
 
-    /// Returns a mutable reference to the inner value.
+    /// Mutably borrows the wrapped value.
+    ///
+    /// Borrowing fails if the underlying reference has been invalidated.
     ///
     /// Typically `T` is an iterator. If `T` is an immutable reference,
     /// `get_mut()` is useless since the inner value can't be mutated.
-    pub fn get_mut<'a>(&'a mut self, _py: Python<'a>) -> &'a mut T {
-        self.data.as_mut().unwrap()
+    pub fn try_borrow_mut<'a>(
+        &'a mut self,
+        py: Python<'a>,
+    ) -> PyResult<PyLeakedRefMut<'a, T>> {
+        self.validate_generation(py)?;
+        Ok(PyLeakedRefMut {
+            _borrow: BorrowPyShared::new(py, self.py_shared_state),
+            data: self.data.as_mut().unwrap(),
+        })
     }
 
     /// Converts the inner value by the given function.
@@ -351,41 +347,85 @@
     /// Typically `T` is a static reference to a container, and `U` is an
     /// iterator of that container.
     ///
+    /// # Panics
+    ///
+    /// Panics if the underlying reference has been invalidated.
+    ///
+    /// This is typically called immediately after the `PyLeaked` is obtained.
+    /// In which case, the reference must be valid and no panic would occur.
+    ///
     /// # Safety
     ///
     /// The lifetime of the object passed in to the function `f` is cheated.
     /// It's typically a static reference, but is valid only while the
-    /// corresponding `PyLeakedRef` is alive. Do not copy it out of the
+    /// corresponding `PyLeaked` is alive. Do not copy it out of the
     /// function call.
     pub unsafe fn map<U>(
         mut self,
         py: Python,
         f: impl FnOnce(T) -> U,
-    ) -> PyLeakedRef<U> {
+    ) -> PyLeaked<U> {
+        // Needs to test the generation value to make sure self.data reference
+        // is still intact.
+        self.validate_generation(py)
+            .expect("map() over invalidated leaked reference");
+
         // f() could make the self.data outlive. That's why map() is unsafe.
         // In order to make this function safe, maybe we'll need a way to
         // temporarily restrict the lifetime of self.data and translate the
         // returned object back to Something<'static>.
         let new_data = f(self.data.take().unwrap());
-        PyLeakedRef {
+        PyLeaked {
             inner: self.inner.clone_ref(py),
             data: Some(new_data),
             py_shared_state: self.py_shared_state,
+            generation: self.generation,
+        }
+    }
+
+    fn validate_generation(&self, py: Python) -> PyResult<()> {
+        if self.py_shared_state.current_generation(py) == self.generation {
+            Ok(())
+        } else {
+            Err(PyErr::new::<exc::RuntimeError, _>(
+                py,
+                "Cannot access to leaked reference after mutation",
+            ))
         }
     }
 }
 
-impl<T> Drop for PyLeakedRef<T> {
-    fn drop(&mut self) {
-        // py_shared_state should be alive since we do have
-        // a Python reference to the owner object. Taking GIL makes
-        // sure that the state is only accessed by this thread.
-        let gil = Python::acquire_gil();
-        let py = gil.python();
-        if self.data.is_none() {
-            return; // moved to another PyLeakedRef
-        }
-        self.py_shared_state.decrease_leak_count(py, false);
+/// Immutably borrowed reference to a leaked value.
+pub struct PyLeakedRef<'a, T> {
+    _borrow: BorrowPyShared<'a>,
+    data: &'a T,
+}
+
+impl<T> Deref for PyLeakedRef<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        self.data
+    }
+}
+
+/// Mutably borrowed reference to a leaked value.
+pub struct PyLeakedRefMut<'a, T> {
+    _borrow: BorrowPyShared<'a>,
+    data: &'a mut T,
+}
+
+impl<T> Deref for PyLeakedRefMut<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        self.data
+    }
+}
+
+impl<T> DerefMut for PyLeakedRefMut<'_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        self.data
     }
 }
 
@@ -414,7 +454,7 @@
 ///     data inner: PySharedRefCell<MyStruct>;
 ///
 ///     def __iter__(&self) -> PyResult<MyTypeItemsIterator> {
-///         let leaked_ref = self.inner_shared(py).leak_immutable()?;
+///         let leaked_ref = self.inner_shared(py).leak_immutable();
 ///         MyTypeItemsIterator::from_inner(
 ///             py,
 ///             unsafe { leaked_ref.map(py, |o| o.iter()) },
@@ -439,7 +479,7 @@
 ///
 /// py_shared_iterator!(
 ///     MyTypeItemsIterator,
-///     PyLeakedRef<HashMap<'static, Vec<u8>, Vec<u8>>>,
+///     PyLeaked<HashMap<'static, Vec<u8>, Vec<u8>>>,
 ///     MyType::translate_key_value,
 ///     Option<(PyBytes, PyBytes)>
 /// );
@@ -452,23 +492,14 @@
         $success_type: ty
     ) => {
         py_class!(pub class $name |py| {
-            data inner: RefCell<Option<$leaked>>;
+            data inner: RefCell<$leaked>;
 
             def __next__(&self) -> PyResult<$success_type> {
-                let mut inner_opt = self.inner(py).borrow_mut();
-                if let Some(leaked) = inner_opt.as_mut() {
-                    match leaked.get_mut(py).next() {
-                        None => {
-                            // replace Some(inner) by None, drop $leaked
-                            inner_opt.take();
-                            Ok(None)
-                        }
-                        Some(res) => {
-                            $success_func(py, res)
-                        }
-                    }
-                } else {
-                    Ok(None)
+                let mut leaked = self.inner(py).borrow_mut();
+                let mut iter = leaked.try_borrow_mut(py)?;
+                match iter.next() {
+                    None => Ok(None),
+                    Some(res) => $success_func(py, res),
                 }
             }
 
@@ -484,7 +515,7 @@
             ) -> PyResult<Self> {
                 Self::create_instance(
                     py,
-                    RefCell::new(Some(leaked)),
+                    RefCell::new(leaked),
                 )
             }
         }
@@ -512,12 +543,94 @@
     }
 
     #[test]
-    fn test_borrow_mut_while_leaked() {
+    fn test_leaked_borrow() {
+        let (gil, owner) = prepare_env();
+        let py = gil.python();
+        let leaked = owner.string_shared(py).leak_immutable();
+        let leaked_ref = leaked.try_borrow(py).unwrap();
+        assert_eq!(*leaked_ref, "new");
+    }
+
+    #[test]
+    fn test_leaked_borrow_mut() {
+        let (gil, owner) = prepare_env();
+        let py = gil.python();
+        let leaked = owner.string_shared(py).leak_immutable();
+        let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
+        let mut leaked_ref = leaked_iter.try_borrow_mut(py).unwrap();
+        assert_eq!(leaked_ref.next(), Some('n'));
+        assert_eq!(leaked_ref.next(), Some('e'));
+        assert_eq!(leaked_ref.next(), Some('w'));
+        assert_eq!(leaked_ref.next(), None);
+    }
+
+    #[test]
+    fn test_leaked_borrow_after_mut() {
+        let (gil, owner) = prepare_env();
+        let py = gil.python();
+        let leaked = owner.string_shared(py).leak_immutable();
+        owner.string_shared(py).borrow_mut().unwrap().clear();
+        assert!(leaked.try_borrow(py).is_err());
+    }
+
+    #[test]
+    fn test_leaked_borrow_mut_after_mut() {
+        let (gil, owner) = prepare_env();
+        let py = gil.python();
+        let leaked = owner.string_shared(py).leak_immutable();
+        let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
+        owner.string_shared(py).borrow_mut().unwrap().clear();
+        assert!(leaked_iter.try_borrow_mut(py).is_err());
+    }
+
+    #[test]
+    #[should_panic(expected = "map() over invalidated leaked reference")]
+    fn test_leaked_map_after_mut() {
+        let (gil, owner) = prepare_env();
+        let py = gil.python();
+        let leaked = owner.string_shared(py).leak_immutable();
+        owner.string_shared(py).borrow_mut().unwrap().clear();
+        let _leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
+    }
+
+    #[test]
+    fn test_borrow_mut_while_leaked_ref() {
         let (gil, owner) = prepare_env();
         let py = gil.python();
         assert!(owner.string_shared(py).borrow_mut().is_ok());
-        let _leaked = owner.string_shared(py).leak_immutable().unwrap();
-        // TODO: will be allowed
-        assert!(owner.string_shared(py).borrow_mut().is_err());
+        let leaked = owner.string_shared(py).leak_immutable();
+        {
+            let _leaked_ref = leaked.try_borrow(py).unwrap();
+            assert!(owner.string_shared(py).borrow_mut().is_err());
+            {
+                let _leaked_ref2 = leaked.try_borrow(py).unwrap();
+                assert!(owner.string_shared(py).borrow_mut().is_err());
+            }
+            assert!(owner.string_shared(py).borrow_mut().is_err());
+        }
+        assert!(owner.string_shared(py).borrow_mut().is_ok());
+    }
+
+    #[test]
+    fn test_borrow_mut_while_leaked_ref_mut() {
+        let (gil, owner) = prepare_env();
+        let py = gil.python();
+        assert!(owner.string_shared(py).borrow_mut().is_ok());
+        let leaked = owner.string_shared(py).leak_immutable();
+        let mut leaked_iter = unsafe { leaked.map(py, |s| s.chars()) };
+        {
+            let _leaked_ref = leaked_iter.try_borrow_mut(py).unwrap();
+            assert!(owner.string_shared(py).borrow_mut().is_err());
+        }
+        assert!(owner.string_shared(py).borrow_mut().is_ok());
+    }
+
+    #[test]
+    #[should_panic(expected = "mutably borrowed")]
+    fn test_leak_while_borrow_mut() {
+        let (gil, owner) = prepare_env();
+        let py = gil.python();
+        let _mut_ref = owner.string_shared(py).borrow_mut();
+        owner.string_shared(py).leak_immutable();
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/revlog.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,235 @@
+// revlog.rs
+//
+// Copyright 2019 Georges Racinet <georges.racinet@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use crate::cindex;
+use cpython::{
+    ObjectProtocol, PyClone, PyDict, PyModule, PyObject, PyResult, PyTuple,
+    Python, PythonObject, ToPyObject,
+};
+use hg::Revision;
+use std::cell::RefCell;
+
+/// Return a Struct implementing the Graph trait
+pub(crate) fn pyindex_to_graph(
+    py: Python,
+    index: PyObject,
+) -> PyResult<cindex::Index> {
+    match index.extract::<MixedIndex>(py) {
+        Ok(midx) => Ok(midx.clone_cindex(py)),
+        Err(_) => cindex::Index::new(py, index),
+    }
+}
+
+py_class!(pub class MixedIndex |py| {
+    data cindex: RefCell<cindex::Index>;
+
+    def __new__(_cls, cindex: PyObject) -> PyResult<MixedIndex> {
+        Self::create_instance(py, RefCell::new(
+            cindex::Index::new(py, cindex)?))
+    }
+
+    /// Compatibility layer used for Python consumers needing access to the C index
+    ///
+    /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
+    /// that may need to build a custom `nodetree`, based on a specified revset.
+    /// With a Rust implementation of the nodemap, we will be able to get rid of
+    /// this, by exposing our own standalone nodemap class,
+    /// ready to accept `MixedIndex`.
+    def get_cindex(&self) -> PyResult<PyObject> {
+        Ok(self.cindex(py).borrow().inner().clone_ref(py))
+    }
+
+
+    // Reforwarded C index API
+
+    // index_methods (tp_methods). Same ordering as in revlog.c
+
+    /// return the gca set of the given revs
+    def ancestors(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "ancestors", args, kw)
+    }
+
+    /// return the heads of the common ancestors of the given revs
+    def commonancestorsheads(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "commonancestorsheads", args, kw)
+    }
+
+    /// clear the index caches
+    def clearcaches(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "clearcaches", args, kw)
+    }
+
+    /// get an index entry
+    def get(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "get", args, kw)
+    }
+
+    /// return `rev` associated with a node or None
+    def get_rev(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "get_rev", args, kw)
+    }
+
+    /// return True if the node exist in the index
+    def has_node(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "has_node", args, kw)
+    }
+
+    /// return `rev` associated with a node or raise RevlogError
+    def rev(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "rev", args, kw)
+    }
+
+    /// compute phases
+    def computephasesmapsets(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "computephasesmapsets", args, kw)
+    }
+
+    /// reachableroots
+    def reachableroots2(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "reachableroots2", args, kw)
+    }
+
+    /// get head revisions
+    def headrevs(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "headrevs", args, kw)
+    }
+
+    /// get filtered head revisions
+    def headrevsfiltered(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "headrevsfiltered", args, kw)
+    }
+
+    /// True if the object is a snapshot
+    def issnapshot(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "issnapshot", args, kw)
+    }
+
+    /// Gather snapshot data in a cache dict
+    def findsnapshots(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "findsnapshots", args, kw)
+    }
+
+    /// determine revisions with deltas to reconstruct fulltext
+    def deltachain(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "deltachain", args, kw)
+    }
+
+    /// slice planned chunk read to reach a density threshold
+    def slicechunktodensity(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "slicechunktodensity", args, kw)
+    }
+
+    /// append an index entry
+    def append(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "append", args, kw)
+    }
+
+    /// match a potentially ambiguous node ID
+    def partialmatch(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "partialmatch", args, kw)
+    }
+
+    /// find length of shortest hex nodeid of a binary ID
+    def shortest(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "shortest", args, kw)
+    }
+
+    /// stats for the index
+    def stats(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "stats", args, kw)
+    }
+
+    // index_sequence_methods and index_mapping_methods.
+    //
+    // Since we call back through the high level Python API,
+    // there's no point making a distinction between index_get
+    // and index_getitem.
+
+    def __len__(&self) -> PyResult<usize> {
+        self.cindex(py).borrow().inner().len(py)
+    }
+
+    def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
+        // this conversion seems needless, but that's actually because
+        // `index_getitem` does not handle conversion from PyLong,
+        // which expressions such as [e for e in index] internally use.
+        // Note that we don't seem to have a direct way to call
+        // PySequence_GetItem (does the job), which would be better for
+        // for performance
+        let key = match key.extract::<Revision>(py) {
+            Ok(rev) => rev.to_py_object(py).into_object(),
+            Err(_) => key,
+        };
+        self.cindex(py).borrow().inner().get_item(py, key)
+    }
+
+    def __setitem__(&self, key: PyObject, value: PyObject) -> PyResult<()> {
+        self.cindex(py).borrow().inner().set_item(py, key, value)
+    }
+
+    def __delitem__(&self, key: PyObject) -> PyResult<()> {
+        self.cindex(py).borrow().inner().del_item(py, key)
+    }
+
+    def __contains__(&self, item: PyObject) -> PyResult<bool> {
+        // ObjectProtocol does not seem to provide contains(), so
+        // this is an equivalent implementation of the index_contains()
+        // defined in revlog.c
+        let cindex = self.cindex(py).borrow();
+        match item.extract::<Revision>(py) {
+            Ok(rev) => {
+                Ok(rev >= -1 && rev < cindex.inner().len(py)? as Revision)
+            }
+            Err(_) => {
+                cindex.inner().call_method(
+                    py,
+                    "has_node",
+                    PyTuple::new(py, &[item]),
+                    None)?
+                .extract(py)
+            }
+        }
+    }
+
+
+});
+
+impl MixedIndex {
+    /// forward a method call to the underlying C index
+    fn call_cindex(
+        &self,
+        py: Python,
+        name: &str,
+        args: &PyTuple,
+        kwargs: Option<&PyDict>,
+    ) -> PyResult<PyObject> {
+        self.cindex(py)
+            .borrow()
+            .inner()
+            .call_method(py, name, args, kwargs)
+    }
+
+    pub fn clone_cindex(&self, py: Python) -> cindex::Index {
+        self.cindex(py).borrow().clone_ref(py)
+    }
+}
+
+/// Create the module, with __package__ given from parent
+pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
+    let dotted_name = &format!("{}.revlog", package);
+    let m = PyModule::new(py, dotted_name)?;
+    m.add(py, "__package__", package)?;
+    m.add(py, "__doc__", "RevLog - Rust implementations")?;
+
+    m.add_class::<MixedIndex>(py)?;
+
+    let sys = PyModule::import(py, "sys")?;
+    let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
+    sys_modules.set_item(py, dotted_name, &m)?;
+
+    Ok(m)
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hgcli/README.rst	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,58 @@
+Features
+--------
+
+The following Cargo features are available:
+
+localdev (default)
+   Produce files that work with an in-source-tree build.
+
+   In this mode, the build finds and uses a ``python2.7`` binary from
+   ``PATH``. The ``hg`` binary assumes it runs from ``rust/target/<target>hg``
+   and it finds Mercurial files at ``dirname($0)/../../../``.
+
+Build Mechanism
+---------------
+
+The produced ``hg`` binary is *bound* to a CPython installation. The
+binary links against and loads a CPython library that is discovered
+at build time (by a ``build.rs`` Cargo build script). The Python
+standard library defined by this CPython installation is also used.
+
+Finding the appropriate CPython installation to use is done by
+the ``python27-sys`` crate's ``build.rs``. Its search order is::
+
+1. ``PYTHON_SYS_EXECUTABLE`` environment variable.
+2. ``python`` executable on ``PATH``
+3. ``python2`` executable on ``PATH``
+4. ``python2.7`` executable on ``PATH``
+
+Additional verification of the found Python will be performed by our
+``build.rs`` to ensure it meets Mercurial's requirements.
+
+Details about the build-time configured Python are built into the
+produced ``hg`` binary. This means that a built ``hg`` binary is only
+suitable for a specific, well-defined role. These roles are controlled
+by Cargo features (see above).
+
+Running
+=======
+
+The ``hgcli`` crate produces an ``hg`` binary. You can run this binary
+via ``cargo run``::
+
+   $ cargo run --manifest-path hgcli/Cargo.toml
+
+Or directly::
+
+   $ target/debug/hg
+   $ target/release/hg
+
+You can also run the test harness with this binary::
+
+   $ ./run-tests.py --with-hg ../rust/target/debug/hg
+
+.. note::
+
+   Integration with the test harness is still preliminary. Remember to
+   ``cargo build`` after changes because the test harness doesn't yet
+   automatically build Rust code.
--- a/rust/hgcli/build.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hgcli/build.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -18,9 +18,8 @@
 fn get_python_config() -> PythonConfig {
     // The python27-sys crate exports a Cargo variable defining the full
     // path to the interpreter being used.
-    let python = env::var("DEP_PYTHON27_PYTHON_INTERPRETER").expect(
-        "Missing DEP_PYTHON27_PYTHON_INTERPRETER; bad python27-sys crate?",
-    );
+    let python = env::var("DEP_PYTHON27_PYTHON_INTERPRETER")
+        .expect("Missing DEP_PYTHON27_PYTHON_INTERPRETER; bad python27-sys crate?");
 
     if !Path::new(&python).exists() {
         panic!(
@@ -33,8 +32,8 @@
     let separator = "SEPARATOR STRING";
 
     let script = "import sysconfig; \
-c = sysconfig.get_config_vars(); \
-print('SEPARATOR STRING'.join('%s=%s' % i for i in c.items()))";
+                  c = sysconfig.get_config_vars(); \
+                  print('SEPARATOR STRING'.join('%s=%s' % i for i in c.items()))";
 
     let mut command = Command::new(&python);
     command.arg("-c").arg(script);
--- a/rust/hgcli/src/main.rs	Thu Jan 09 14:19:20 2020 -0500
+++ b/rust/hgcli/src/main.rs	Tue Jan 21 13:14:51 2020 -0500
@@ -5,18 +5,18 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
+extern crate cpython;
 extern crate libc;
-extern crate cpython;
 extern crate python27_sys;
 
 use cpython::{NoArgs, ObjectProtocol, PyModule, PyResult, Python};
 use libc::{c_char, c_int};
 
 use std::env;
-use std::path::PathBuf;
 use std::ffi::{CString, OsStr};
 #[cfg(target_family = "unix")]
 use std::os::unix::ffi::{OsStrExt, OsStringExt};
+use std::path::PathBuf;
 
 #[derive(Debug)]
 struct Environment {
--- a/setup.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/setup.py	Tue Jan 21 13:14:51 2020 -0500
@@ -713,36 +713,40 @@
             self.compiler.compiler_so = self.compiler.compiler  # no -mdll
             self.compiler.dll_libraries = []  # no -lmsrvc90
 
-        # Different Python installs can have different Python library
-        # names. e.g. the official CPython distribution uses pythonXY.dll
-        # and MinGW uses libpythonX.Y.dll.
-        _kernel32 = ctypes.windll.kernel32
-        _kernel32.GetModuleFileNameA.argtypes = [
-            ctypes.c_void_p,
-            ctypes.c_void_p,
-            ctypes.c_ulong,
-        ]
-        _kernel32.GetModuleFileNameA.restype = ctypes.c_ulong
-        size = 1000
-        buf = ctypes.create_string_buffer(size + 1)
-        filelen = _kernel32.GetModuleFileNameA(
-            sys.dllhandle, ctypes.byref(buf), size
-        )
+        pythonlib = None
 
-        if filelen > 0 and filelen != size:
-            dllbasename = os.path.basename(buf.value)
-            if not dllbasename.lower().endswith(b'.dll'):
-                raise SystemExit(
-                    'Python DLL does not end with .dll: %s' % dllbasename
-                )
-            pythonlib = dllbasename[:-4]
-        else:
+        if getattr(sys, 'dllhandle', None):
+            # Different Python installs can have different Python library
+            # names. e.g. the official CPython distribution uses pythonXY.dll
+            # and MinGW uses libpythonX.Y.dll.
+            _kernel32 = ctypes.windll.kernel32
+            _kernel32.GetModuleFileNameA.argtypes = [
+                ctypes.c_void_p,
+                ctypes.c_void_p,
+                ctypes.c_ulong,
+            ]
+            _kernel32.GetModuleFileNameA.restype = ctypes.c_ulong
+            size = 1000
+            buf = ctypes.create_string_buffer(size + 1)
+            filelen = _kernel32.GetModuleFileNameA(
+                sys.dllhandle, ctypes.byref(buf), size
+            )
+
+            if filelen > 0 and filelen != size:
+                dllbasename = os.path.basename(buf.value)
+                if not dllbasename.lower().endswith(b'.dll'):
+                    raise SystemExit(
+                        'Python DLL does not end with .dll: %s' % dllbasename
+                    )
+                pythonlib = dllbasename[:-4]
+
+        if not pythonlib:
             log.warn(
-                'could not determine Python DLL filename; ' 'assuming pythonXY'
+                'could not determine Python DLL filename; assuming pythonXY'
             )
 
             hv = sys.hexversion
-            pythonlib = 'python%d%d' % (hv >> 24, (hv >> 16) & 0xFF)
+            pythonlib = b'python%d%d' % (hv >> 24, (hv >> 16) & 0xFF)
 
         log.info('using %s as Python library name' % pythonlib)
         with open('mercurial/hgpythonlib.h', 'wb') as f:
@@ -931,7 +935,7 @@
         # This logic is duplicated in doc/Makefile.
         sources = set(
             f
-            for f in os.listdir('mercurial/help')
+            for f in os.listdir('mercurial/helptext')
             if re.search(r'[0-9]\.txt$', f)
         )
 
@@ -1060,11 +1064,7 @@
             # absolute path instead
             libdir = self.install_lib
         else:
-            common = os.path.commonprefix((self.install_dir, self.install_lib))
-            rest = self.install_dir[len(common) :]
-            uplevel = len([n for n in os.path.split(rest) if n])
-
-            libdir = uplevel * ('..' + os.sep) + self.install_lib[len(common) :]
+            libdir = os.path.relpath(self.install_lib, self.install_dir)
 
         for outfile in self.outfiles:
             with open(outfile, 'rb') as fp:
@@ -1191,6 +1191,9 @@
     'mercurial',
     'mercurial.cext',
     'mercurial.cffi',
+    'mercurial.defaultrc',
+    'mercurial.helptext',
+    'mercurial.helptext.internals',
     'mercurial.hgweb',
     'mercurial.interfaces',
     'mercurial.pure',
@@ -1379,9 +1382,9 @@
 class RustEnhancedExtension(RustExtension):
     """A C Extension, conditionally enhanced with Rust code.
 
-    If the HGRUSTEXT environment variable is set to something else
-    than 'cpython', the Rust sources get compiled and linked within the
-    C target shared library object.
+    If the HGWITHRUSTEXT environment variable is set to something else
+    than 'cpython', the Rust sources get compiled and linked within
+    the C target shared library object.
     """
 
     def __init__(self, mpath, sources, rustlibname, subcrate, **kw):
@@ -1474,6 +1477,14 @@
         ],
     ),
     Extension(
+        'mercurial.thirdparty.sha1dc',
+        [
+            'mercurial/thirdparty/sha1dc/cext.c',
+            'mercurial/thirdparty/sha1dc/lib/sha1.c',
+            'mercurial/thirdparty/sha1dc/lib/ubc_check.c',
+        ],
+    ),
+    Extension(
         'hgext.fsmonitor.pywatchman.bser', ['hgext/fsmonitor/pywatchman/bser.c']
     ),
     RustStandaloneExtension(
@@ -1535,11 +1546,11 @@
 packagedata = {
     'mercurial': [
         'locale/*/LC_MESSAGES/hg.mo',
-        'help/*.txt',
-        'help/internals/*.txt',
-        'default.d/*.rc',
+        'defaultrc/*.rc',
         'dummycert.pem',
-    ]
+    ],
+    'mercurial.helptext': ['*.txt',],
+    'mercurial.helptext.internals': ['*.txt',],
 }
 
 
--- a/tests/f	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/f	Tue Jan 21 13:14:51 2020 -0500
@@ -34,14 +34,18 @@
 import sys
 
 # Python 3 adapters
-ispy3 = (sys.version_info[0] >= 3)
+ispy3 = sys.version_info[0] >= 3
 if ispy3:
+
     def iterbytes(s):
         for i in range(len(s)):
-            yield s[i:i + 1]
+            yield s[i : i + 1]
+
+
 else:
     iterbytes = iter
 
+
 def visit(opts, filenames, outfile):
     """Process filenames in the way specified in opts, writing output to
     outfile."""
@@ -88,21 +92,26 @@
             if opts.newer:
                 # mtime might be in whole seconds so newer file might be same
                 if stat.st_mtime >= os.stat(opts.newer).st_mtime:
-                    facts.append(b'newer than %s' % opts.newer.encode(
-                        'utf8', 'replace'))
+                    facts.append(
+                        b'newer than %s' % opts.newer.encode('utf8', 'replace')
+                    )
                 else:
-                    facts.append(b'older than %s' % opts.newer.encode(
-                        'utf8', 'replace'))
+                    facts.append(
+                        b'older than %s' % opts.newer.encode('utf8', 'replace')
+                    )
         if opts.md5 and content is not None:
             h = hashlib.md5(content)
-            facts.append(b'md5=%s' % binascii.hexlify(h.digest())[:opts.bytes])
+            facts.append(b'md5=%s' % binascii.hexlify(h.digest())[: opts.bytes])
         if opts.sha1 and content is not None:
             h = hashlib.sha1(content)
-            facts.append(b'sha1=%s' % binascii.hexlify(h.digest())[:opts.bytes])
+            facts.append(
+                b'sha1=%s' % binascii.hexlify(h.digest())[: opts.bytes]
+            )
         if opts.sha256 and content is not None:
             h = hashlib.sha256(content)
-            facts.append(b'sha256=%s' %
-                         binascii.hexlify(h.digest())[:opts.bytes])
+            facts.append(
+                b'sha256=%s' % binascii.hexlify(h.digest())[: opts.bytes]
+            )
         if isstdin:
             outfile.write(b', '.join(facts) + b'\n')
         elif facts:
@@ -114,21 +123,25 @@
             if not islink:
                 if opts.lines:
                     if opts.lines >= 0:
-                        chunk = b''.join(chunk.splitlines(True)[:opts.lines])
+                        chunk = b''.join(chunk.splitlines(True)[: opts.lines])
                     else:
-                        chunk = b''.join(chunk.splitlines(True)[opts.lines:])
+                        chunk = b''.join(chunk.splitlines(True)[opts.lines :])
                 if opts.bytes:
                     if opts.bytes >= 0:
-                        chunk = chunk[:opts.bytes]
+                        chunk = chunk[: opts.bytes]
                     else:
-                        chunk = chunk[opts.bytes:]
+                        chunk = chunk[opts.bytes :]
             if opts.hexdump:
                 for i in range(0, len(chunk), 16):
-                    s = chunk[i:i + 16]
-                    outfile.write(b'%04x: %-47s |%s|\n' %
-                                  (i, b' '.join(
-                                      b'%02x' % ord(c) for c in iterbytes(s)),
-                                   re.sub(b'[^ -~]', b'.', s)))
+                    s = chunk[i : i + 16]
+                    outfile.write(
+                        b'%04x: %-47s |%s|\n'
+                        % (
+                            i,
+                            b' '.join(b'%02x' % ord(c) for c in iterbytes(s)),
+                            re.sub(b'[^ -~]', b'.', s),
+                        )
+                    )
             if opts.dump:
                 if not quiet:
                     outfile.write(b'>>>\n')
@@ -142,36 +155,60 @@
             assert not isstdin
             visit(opts, dirfiles, outfile)
 
+
 if __name__ == "__main__":
     parser = optparse.OptionParser("%prog [options] [filenames]")
-    parser.add_option("-t", "--type", action="store_true",
-                      help="show file type (file or directory)")
-    parser.add_option("-m", "--mode", action="store_true",
-                      help="show file mode")
-    parser.add_option("-l", "--links", action="store_true",
-                      help="show number of links")
-    parser.add_option("-s", "--size", action="store_true",
-                      help="show size of file")
-    parser.add_option("-n", "--newer", action="store",
-                      help="check if file is newer (or same)")
-    parser.add_option("-r", "--recurse", action="store_true",
-                      help="recurse into directories")
-    parser.add_option("-S", "--sha1", action="store_true",
-                      help="show sha1 hash of the content")
-    parser.add_option("", "--sha256", action="store_true",
-                      help="show sha256 hash of the content")
-    parser.add_option("-M", "--md5", action="store_true",
-                      help="show md5 hash of the content")
-    parser.add_option("-D", "--dump", action="store_true",
-                      help="dump file content")
-    parser.add_option("-H", "--hexdump", action="store_true",
-                      help="hexdump file content")
-    parser.add_option("-B", "--bytes", type="int",
-                      help="number of characters to dump")
-    parser.add_option("-L", "--lines", type="int",
-                      help="number of lines to dump")
-    parser.add_option("-q", "--quiet", action="store_true",
-                      help="no default output")
+    parser.add_option(
+        "-t",
+        "--type",
+        action="store_true",
+        help="show file type (file or directory)",
+    )
+    parser.add_option(
+        "-m", "--mode", action="store_true", help="show file mode"
+    )
+    parser.add_option(
+        "-l", "--links", action="store_true", help="show number of links"
+    )
+    parser.add_option(
+        "-s", "--size", action="store_true", help="show size of file"
+    )
+    parser.add_option(
+        "-n", "--newer", action="store", help="check if file is newer (or same)"
+    )
+    parser.add_option(
+        "-r", "--recurse", action="store_true", help="recurse into directories"
+    )
+    parser.add_option(
+        "-S",
+        "--sha1",
+        action="store_true",
+        help="show sha1 hash of the content",
+    )
+    parser.add_option(
+        "",
+        "--sha256",
+        action="store_true",
+        help="show sha256 hash of the content",
+    )
+    parser.add_option(
+        "-M", "--md5", action="store_true", help="show md5 hash of the content"
+    )
+    parser.add_option(
+        "-D", "--dump", action="store_true", help="dump file content"
+    )
+    parser.add_option(
+        "-H", "--hexdump", action="store_true", help="hexdump file content"
+    )
+    parser.add_option(
+        "-B", "--bytes", type="int", help="number of characters to dump"
+    )
+    parser.add_option(
+        "-L", "--lines", type="int", help="number of lines to dump"
+    )
+    parser.add_option(
+        "-q", "--quiet", action="store_true", help="no default output"
+    )
     (opts, filenames) = parser.parse_args(sys.argv[1:])
     if not filenames:
         filenames = ['-']
--- a/tests/fakedirstatewritetime.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/fakedirstatewritetime.py	Tue Jan 21 13:14:51 2020 -0500
@@ -30,8 +30,8 @@
     b'fakedirstatewritetime', b'fakenow', default=None,
 )
 
-parsers = policy.importmod(r'parsers')
-rustmod = policy.importrust(r'parsers')
+parsers = policy.importmod('parsers')
+rustmod = policy.importrust('parsers')
 
 
 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
--- a/tests/hghave	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/hghave	Tue Jan 21 13:14:51 2020 -0500
@@ -13,11 +13,13 @@
 
 checks = hghave.checks
 
+
 def list_features():
     for name, feature in sorted(checks.items()):
         desc = feature[1]
         print(name + ':', desc)
 
+
 def test_features():
     failed = 0
     for name, feature in checks.items():
@@ -29,11 +31,15 @@
             failed += 1
     return failed
 
+
 parser = optparse.OptionParser("%prog [options] [features]")
-parser.add_option("--test-features", action="store_true",
-                  help="test available features")
-parser.add_option("--list-features", action="store_true",
-                  help="list available features")
+parser.add_option(
+    "--test-features", action="store_true", help="test available features"
+)
+parser.add_option(
+    "--list-features", action="store_true", help="list available features"
+)
+
 
 def _loadaddon():
     if 'TESTDIR' in os.environ:
@@ -49,13 +55,16 @@
     sys.path.insert(0, path)
     try:
         import hghaveaddon
+
         assert hghaveaddon  # silence pyflakes
     except BaseException as inst:
-        sys.stderr.write('failed to import hghaveaddon.py from %r: %s\n'
-                         % (path, inst))
+        sys.stderr.write(
+            'failed to import hghaveaddon.py from %r: %s\n' % (path, inst)
+        )
         sys.exit(2)
     sys.path.pop(0)
 
+
 if __name__ == '__main__':
     options, args = parser.parse_args()
     _loadaddon()
--- a/tests/hghave.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/hghave.py	Tue Jan 21 13:14:51 2020 -0500
@@ -431,7 +431,8 @@
 
 @check("symlink", "symbolic links")
 def has_symlink():
-    if getattr(os, "symlink", None) is None:
+    # mercurial.windows.checklink() is a hard 'no' at the moment
+    if os.name == 'nt' or getattr(os, "symlink", None) is None:
         return False
     name = tempfile.mktemp(dir='.', prefix=tempprefix)
     try:
@@ -576,6 +577,22 @@
         return False
 
 
+@check("pygments25", "Pygments version >= 2.5")
+def pygments25():
+    try:
+        import pygments
+
+        v = pygments.__version__
+    except ImportError:
+        return False
+
+    parts = v.split(".")
+    major = int(parts[0])
+    minor = int(parts[1])
+
+    return (major, minor) >= (2, 5)
+
+
 @check("outer-repo", "outer repo")
 def has_outer_repo():
     # failing for other reasons than 'no repo' imply that there is a repo
@@ -672,6 +689,13 @@
         return False
 
 
+@check("xz", "xz compression utility")
+def has_xz():
+    # When Windows invokes a subprocess in shell mode, it uses `cmd.exe`, which
+    # only knows `where`, not `which`.  So invoke MSYS shell explicitly.
+    return matchoutput("sh -c 'test -x \"`which xz`\"'", b'')
+
+
 @check("msys", "Windows with MSYS")
 def has_msys():
     return os.getenv('MSYSTEM')
@@ -999,3 +1023,19 @@
     version = matchoutput(blackcmd, version_regex)
     sv = distutils.version.StrictVersion
     return version and sv(_strpath(version.group(1))) >= sv('19.10b0')
+
+
+@check('pytype', 'the pytype type checker')
+def has_pytype():
+    pytypecmd = 'pytype --version'
+    version = matchoutput(pytypecmd, b'[0-9a-b.]+')
+    sv = distutils.version.StrictVersion
+    return version and sv(_strpath(version.group(0))) >= sv('2019.10.17')
+
+
+@check("rustfmt", "rustfmt tool")
+def has_rustfmt():
+    # We use Nightly's rustfmt due to current unstable config options.
+    return matchoutput(
+        '`rustup which --toolchain nightly rustfmt` --version', b'rustfmt'
+    )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/accept-4564.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,141 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:31:57 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"4564\",\"phid\":\"PHID-DREV-6cgnf5fyeeqhntbxgfb7\",\"title\":\"localrepo: move some vfs initialization out of __init__\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D4564\",\"dateCreated\":\"1536856174\",\"dateModified\":\"1537307962\",\"authorPHID\":\"PHID-USER-p54bpwbifxx7sbgpx47d\",\"status\":\"3\",\"statusName\":\"Closed\",\"properties\":{\"wasAcceptedBeforeClose\":true},\"branch\":null,\"summary\":\"In order to make repository types more dynamic, we'll need to move the\\nlogic for determining repository behavior out of\\nlocalrepository.__init__ so we can influence behavior before the type\\nis instantiated.\\n\\nThis commit starts that process by moving working directory and .hg\\/\\nvfs initialization to our new standalone function for instantiating\\nlocal repositories.\\n\\nAside from API changes, behavior should be fully backwards compatible.\\n\\n.. api::\\n\\n   localrepository.__init__ now does less work and accepts new args\\n\\n   Use ``hg.repository()``, ``localrepo.instance()``, or\\n   ``localrepo.makelocalrepository()`` to obtain a new local repository\\n   instance instead of calling the ``localrepository`` constructor\\n   directly.\",\"testPlan\":\"\",\"lineCount\":\"64\",\"activeDiffPHID\":\"PHID-DIFF-7m4gug2nq4zt7jwxzqg2\",\"diffs\":[\"11162\",\"11002\"],\"commits\":[\"PHID-CMIT-xknk3j65xkoirmrpelni\"],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\",\"PHID-USER-cgcdlc6c3gpxapbmkwa2\":\"PHID-USER-cgcdlc6c3gpxapbmkwa2\"},\"ccs\":[\"PHID-USER-cgcdlc6c3gpxapbmkwa2\",\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-gqp33hnxg65vkl3xioka\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "146"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B4564%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:31:57 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":\"ERR-CONDUIT-CORE\",\"error_info\":\"Validation errors:\\n  - You can not accept this revision because it has already been closed. Only open revisions can be accepted.\"}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "402"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22objectIdentifier%22%3A+%22PHID-DREV-6cgnf5fyeeqhntbxgfb7%22%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22accept%22%2C+%22value%22%3A+true%7D%2C+%7B%22type%22%3A+%22comment%22%2C+%22value%22%3A+%22I+think+I+like+where+this+is+headed.+Will+read+rest+of+series+later.%22%7D%5D%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/accept-7913.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,141 @@
+{
+    "version": 1, 
+    "interactions": [
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"7913\",\"phid\":\"PHID-DREV-s4borg2nl7ay2mskktwq\",\"title\":\"cext: fix compiler warning about sign changing\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D7913\",\"dateCreated\":\"1579207172\",\"dateModified\":\"1579207173\",\"authorPHID\":\"PHID-USER-5iutahkpkhvnxfimqjbk\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":6,\"lines.removed\":6},\"branch\":\"default\",\"summary\":\"line.len is a Py_ssize_t, and we're casing to size_t (unsigned). On my compiler,\\nthis causes a warning to be emitted:\\n\\n```\\nmercurial\\/cext\\/manifest.c: In function 'pathlen':\\nmercurial\\/cext\\/manifest.c:48:44: warning: operand of ?: changes signedness from 'Py_ssize_t' {aka 'long int'} to 'long unsigned int' due to unsignedness of other operand [-Wsign-compare]\\n  return (end) ? (size_t)(end - l-\\u003estart) : l-\\u003elen;\\n                                            ^~~~~~\\n```\",\"testPlan\":\"\",\"lineCount\":\"12\",\"activeDiffPHID\":\"PHID-DIFF-vms2yu54d7di7r332dbs\",\"diffs\":[\"19380\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\"}],\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 01:03:23 GMT"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ]
+                }
+            }, 
+            "request": {
+                "body": "output=json&__conduit__=1&params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B7913%5D%7D", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "content-length": [
+                        "146"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+621-e7ba2449a883)"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ]
+                }, 
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query"
+            }
+        }, 
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":7913,\"phid\":\"PHID-DREV-s4borg2nl7ay2mskktwq\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-7fwthnytbq3bw2p\"},{\"phid\":\"PHID-XACT-DREV-lno5olcencqrgnh\"},{\"phid\":\"PHID-XACT-DREV-uanndmc3t3onueu\"}]},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 01:03:23 GMT"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ]
+                }
+            }, 
+            "request": {
+                "body": "output=json&__conduit__=1&params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22objectIdentifier%22%3A+%22PHID-DREV-s4borg2nl7ay2mskktwq%22%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22accept%22%2C+%22value%22%3A+true%7D%2C+%7B%22type%22%3A+%22comment%22%2C+%22value%22%3A+%22LGTM%22%7D%5D%7D", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "content-length": [
+                        "338"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+621-e7ba2449a883)"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ]
+                }, 
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit"
+            }
+        }
+    ]
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phab-conduit.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,73 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:52 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"data\":[],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/user.search", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "169"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22constraints%22%3A+%7B%22isBot%22%3A+true%7D%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabread-4480.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,209 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:31:54 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"4480\",\"phid\":\"PHID-DREV-gsa7dkuimmam7nafw7h3\",\"title\":\"exchangev2: start to implement pull with wire protocol v2\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D4480\",\"dateCreated\":\"1536164431\",\"dateModified\":\"1536981352\",\"authorPHID\":\"PHID-USER-p54bpwbifxx7sbgpx47d\",\"status\":\"3\",\"statusName\":\"Closed\",\"properties\":{\"wasAcceptedBeforeClose\":false},\"branch\":null,\"summary\":\"Wire protocol version 2 will take a substantially different\\napproach to exchange than version 1 (at least as far as pulling\\nis concerned).\\n\\nThis commit establishes a new exchangev2 module for holding\\ncode related to exchange using wire protocol v2. I could have\\nadded things to the existing exchange module. But it is already\\nquite big. And doing things inline isn't in question because\\nthe existing code is already littered with conditional code\\nfor various states of support for the existing wire protocol\\nas it evolved over 10+ years. A new module gives us a chance\\nto make a clean break.\\n\\nThis approach does mean we'll end up writing some duplicate\\ncode. And there's a significant chance we'll miss functionality\\nas code is ported. The plan is to eventually add #testcase's\\nto existing tests so the new wire protocol is tested side-by-side\\nwith the existing one. This will hopefully tease out any\\nfeatures that weren't ported properly. But before we get there,\\nwe need to build up support for the new exchange methods.\\n\\nOur journey towards implementing a new exchange begins with pulling.\\nAnd pulling begins with discovery.\\n\\nThe discovery code added to exchangev2 is heavily drawn from\\nthe following functions:\\n\\n* exchange._pulldiscoverychangegroup\\n* discovery.findcommonincoming\\n\\nFor now, we build on top of existing discovery mechanisms. The\\nnew wire protocol should be capable of doing things more efficiently.\\nBut I'd rather defer on this problem.\\n\\nTo foster the transition, we invent a fake capability on the HTTPv2\\npeer and have the main pull code in exchange.py call into exchangev2\\nwhen the new wire protocol is being used.\",\"testPlan\":\"\",\"lineCount\":\"145\",\"activeDiffPHID\":\"PHID-DIFF-kg2rt6kiekgo5rgyeu5n\",\"diffs\":[\"11058\",\"10961\",\"10793\"],\"commits\":[\"PHID-CMIT-kvz2f3rczvi6exmvtyaq\"],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-cgcdlc6c3gpxapbmkwa2\",\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-a77jfv32jtxfwxngd6bd\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "146"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B4480%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:31:55 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"11058\":{\"id\":\"11058\",\"revisionID\":\"4480\",\"dateCreated\":\"1536771503\",\"dateModified\":\"1536981352\",\"sourceControlBaseRevision\":\"a5de21c9e3703f8e8eb064bd7d893ff2f703c66a\",\"sourceControlPath\":null,\"sourceControlSystem\":\"hg\",\"branch\":null,\"bookmark\":null,\"creationMethod\":\"commit\",\"description\":\"rHGa86d21e70b2b79d5e7e1085e5e755b4b26b8676d\",\"unitStatus\":\"6\",\"lintStatus\":\"6\",\"changes\":[{\"id\":\"24371\",\"metadata\":{\"line:first\":59},\"oldPath\":\"tests\\/wireprotohelpers.sh\",\"currentPath\":\"tests\\/wireprotohelpers.sh\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"7\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"58\",\"newLength\":\"65\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" HTTPV2=exp-http-v2-0001\\n MEDIATYPE=application\\/mercurial-exp-framing-0005\\n \\n sendhttpraw() {\\n   hg --verbose debugwireproto --peer raw http:\\/\\/$LOCALIP:$HGPORT\\/\\n }\\n \\n sendhttpv2peer() {\\n   hg --verbose debugwireproto --nologhandshake --peer http2 http:\\/\\/$LOCALIP:$HGPORT\\/\\n }\\n \\n sendhttpv2peerhandshake() {\\n   hg --verbose debugwireproto --peer http2 http:\\/\\/$LOCALIP:$HGPORT\\/\\n }\\n \\n cat \\u003e dummycommands.py \\u003c\\u003c EOF\\n from mercurial import (\\n     wireprototypes,\\n     wireprotov1server,\\n     wireprotov2server,\\n )\\n \\n @wireprotov1server.wireprotocommand(b'customreadonly', permission=b'pull')\\n def customreadonlyv1(repo, proto):\\n     return wireprototypes.bytesresponse(b'customreadonly bytes response')\\n \\n @wireprotov2server.wireprotocommand(b'customreadonly', permission=b'pull')\\n def customreadonlyv2(repo, proto):\\n     yield b'customreadonly bytes response'\\n \\n @wireprotov1server.wireprotocommand(b'customreadwrite', permission=b'push')\\n def customreadwrite(repo, proto):\\n     return wireprototypes.bytesresponse(b'customreadwrite bytes response')\\n \\n @wireprotov2server.wireprotocommand(b'customreadwrite', permission=b'push')\\n def customreadwritev2(repo, proto):\\n     yield b'customreadwrite bytes response'\\n EOF\\n \\n cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n [extensions]\\n drawdag = $TESTDIR\\/drawdag.py\\n EOF\\n \\n enabledummycommands() {\\n   cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n [extensions]\\n dummycommands = $TESTTMP\\/dummycommands.py\\n EOF\\n }\\n \\n enablehttpv2() {\\n   cat \\u003e\\u003e $1\\/.hg\\/hgrc \\u003c\\u003c EOF\\n [experimental]\\n web.apiserver = true\\n web.api.http-v2 = true\\n EOF\\n }\\n+\\n+enablehttpv2client() {\\n+  cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n+[experimental]\\n+httppeer.advertise-v2 = true\\n+EOF\\n+}\\n\"}]},{\"id\":\"24370\",\"metadata\":{\"line:first\":1},\"oldPath\":null,\"currentPath\":\"tests\\/test-wireproto-exchangev2.t\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"53\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"53\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+Tests for wire protocol version 2 exchange.\\n+Tests in this file should be folded into existing tests once protocol\\n+v2 has enough features that it can be enabled via #testcase in existing\\n+tests.\\n+\\n+  $ . $TESTDIR\\/wireprotohelpers.sh\\n+  $ enablehttpv2client\\n+\\n+  $ hg init server-simple\\n+  $ enablehttpv2 server-simple\\n+  $ cd server-simple\\n+  $ cat \\u003e\\u003e .hg\\/hgrc \\u003c\\u003c EOF\\n+  \\u003e [phases]\\n+  \\u003e publish = false\\n+  \\u003e EOF\\n+  $ echo a0 \\u003e a\\n+  $ echo b0 \\u003e b\\n+  $ hg -q commit -A -m 'commit 0'\\n+\\n+  $ echo a1 \\u003e a\\n+  $ hg commit -m 'commit 1'\\n+  $ hg phase --public -r .\\n+  $ echo a2 \\u003e a\\n+  $ hg commit -m 'commit 2'\\n+\\n+  $ hg -q up -r 0\\n+  $ echo b1 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 1'\\n+  $ echo b2 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 2'\\n+\\n+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log\\n+  $ cat hg.pid \\u003e $DAEMON_PIDS\\n+\\n+  $ cd ..\\n+\\n+Test basic clone\\n+\\n+  $ hg --debug clone -U http:\\/\\/localhost:$HGPORT client-simple\\n+  using http:\\/\\/localhost:$HGPORT\\/\\n+  sending capabilities command\\n+  query 1; heads\\n+  sending 2 commands\\n+  sending command heads: {}\\n+  sending command known: {\\n+    'nodes': []\\n+  }\\n+  received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)\\n+  received frame(size=43; request=1; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)\\n+  received frame(size=11; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=1; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)\\n\"}]},{\"id\":\"24369\",\"metadata\":{\"line:first\":805},\"oldPath\":\"mercurial\\/httppeer.py\",\"currentPath\":\"mercurial\\/httppeer.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"2\",\"delLines\":\"1\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"1006\",\"newLength\":\"1007\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" # httppeer.py - HTTP repository proxy classes for mercurial\\n #\\n # Copyright 2005, 2006 Matt Mackall \\u003cmpm@selenic.com\\u003e\\n # Copyright 2006 Vadim Gelfer \\u003cvadim.gelfer@gmail.com\\u003e\\n #\\n # This software may be used and distributed according to the terms of the\\n # GNU General Public License version 2 or any later version.\\n \\n from __future__ import absolute_import\\n \\n import errno\\n import io\\n import os\\n import socket\\n import struct\\n import weakref\\n \\n from .i18n import _\\n from . import (\\n     bundle2,\\n     error,\\n     httpconnection,\\n     pycompat,\\n     repository,\\n     statichttprepo,\\n     url as urlmod,\\n     util,\\n     wireprotoframing,\\n     wireprototypes,\\n     wireprotov1peer,\\n     wireprotov2peer,\\n     wireprotov2server,\\n )\\n from .utils import (\\n     cborutil,\\n     interfaceutil,\\n     stringutil,\\n )\\n \\n httplib = util.httplib\\n urlerr = util.urlerr\\n urlreq = util.urlreq\\n \\n def encodevalueinheaders(value, header, limit):\\n     \\\"\\\"\\\"Encode a string value into multiple HTTP headers.\\n \\n     ``value`` will be encoded into 1 or more HTTP headers with the names\\n     ``header-\\u003cN\\u003e`` where ``\\u003cN\\u003e`` is an integer starting at 1. Each header\\n     name + value will be at most ``limit`` bytes long.\\n \\n     Returns an iterable of 2-tuples consisting of header names and\\n     values as native strings.\\n     \\\"\\\"\\\"\\n     # HTTP Headers are ASCII. Python 3 requires them to be unicodes,\\n     # not bytes. This function always takes bytes in as arguments.\\n     fmt = pycompat.strurl(header) + r'-%s'\\n     # Note: it is *NOT* a bug that the last bit here is a bytestring\\n     # and not a unicode: we're just getting the encoded length anyway,\\n     # and using an r-string to make it portable between Python 2 and 3\\n     # doesn't work because then the \\\\r is a literal backslash-r\\n     # instead of a carriage return.\\n     valuelen = limit - len(fmt % r'000') - len(': \\\\r\\\\n')\\n     result = []\\n \\n     n = 0\\n     for i in pycompat.xrange(0, len(value), valuelen):\\n         n += 1\\n         result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))\\n \\n     return result\\n \\n def _wraphttpresponse(resp):\\n     \\\"\\\"\\\"Wrap an HTTPResponse with common error handlers.\\n \\n     This ensures that any I\\/O from any consumer raises the appropriate\\n     error and messaging.\\n     \\\"\\\"\\\"\\n     origread = resp.read\\n \\n     class readerproxy(resp.__class__):\\n         def read(self, size=None):\\n             try:\\n                 return origread(size)\\n             except httplib.IncompleteRead as e:\\n                 # e.expected is an integer if length known or None otherwise.\\n                 if e.expected:\\n                     got = len(e.partial)\\n                     total = e.expected + got\\n                     msg = _('HTTP request error (incomplete response; '\\n                             'expected %d bytes got %d)') % (total, got)\\n                 else:\\n                     msg = _('HTTP request error (incomplete response)')\\n \\n                 raise error.PeerTransportError(\\n                     msg,\\n                     hint=_('this may be an intermittent network failure; '\\n                            'if the error persists, consider contacting the '\\n                            'network or server operator'))\\n             except httplib.HTTPException as e:\\n                 raise error.PeerTransportError(\\n                     _('HTTP request error (%s)') % e,\\n                     hint=_('this may be an intermittent network failure; '\\n                            'if the error persists, consider contacting the '\\n                            'network or server operator'))\\n \\n     resp.__class__ = readerproxy\\n \\n class _multifile(object):\\n     def __init__(self, *fileobjs):\\n         for f in fileobjs:\\n             if not util.safehasattr(f, 'length'):\\n                 raise ValueError(\\n                     '_multifile only supports file objects that '\\n                     'have a length but this one does not:', type(f), f)\\n         self._fileobjs = fileobjs\\n         self._index = 0\\n \\n     @property\\n     def length(self):\\n         return sum(f.length for f in self._fileobjs)\\n \\n     def read(self, amt=None):\\n         if amt \\u003c= 0:\\n             return ''.join(f.read() for f in self._fileobjs)\\n         parts = []\\n         while amt and self._index \\u003c len(self._fileobjs):\\n             parts.append(self._fileobjs[self._index].read(amt))\\n             got = len(parts[-1])\\n             if got \\u003c amt:\\n                 self._index += 1\\n             amt -= got\\n         return ''.join(parts)\\n \\n     def seek(self, offset, whence=os.SEEK_SET):\\n         if whence != os.SEEK_SET:\\n             raise NotImplementedError(\\n                 '_multifile does not support anything other'\\n                 ' than os.SEEK_SET for whence on seek()')\\n         if offset != 0:\\n             raise NotImplementedError(\\n                 '_multifile only supports seeking to start, but that '\\n                 'could be fixed if you need it')\\n         for f in self._fileobjs:\\n             f.seek(0)\\n         self._index = 0\\n \\n def makev1commandrequest(ui, requestbuilder, caps, capablefn,\\n                          repobaseurl, cmd, args):\\n     \\\"\\\"\\\"Make an HTTP request to run a command for a version 1 client.\\n \\n     ``caps`` is a set of known server capabilities. The value may be\\n     None if capabilities are not yet known.\\n \\n     ``capablefn`` is a function to evaluate a capability.\\n \\n     ``cmd``, ``args``, and ``data`` define the command, its arguments, and\\n     raw data to pass to it.\\n     \\\"\\\"\\\"\\n     if cmd == 'pushkey':\\n         args['data'] = ''\\n     data = args.pop('data', None)\\n     headers = args.pop('headers', {})\\n \\n     ui.debug(\\\"sending %s command\\\\n\\\" % cmd)\\n     q = [('cmd', cmd)]\\n     headersize = 0\\n     # Important: don't use self.capable() here or else you end up\\n     # with infinite recursion when trying to look up capabilities\\n     # for the first time.\\n     postargsok = caps is not None and 'httppostargs' in caps\\n \\n     # Send arguments via POST.\\n     if postargsok and args:\\n         strargs = urlreq.urlencode(sorted(args.items()))\\n         if not data:\\n             data = strargs\\n         else:\\n             if isinstance(data, bytes):\\n                 i = io.BytesIO(data)\\n                 i.length = len(data)\\n                 data = i\\n             argsio = io.BytesIO(strargs)\\n             argsio.length = len(strargs)\\n             data = _multifile(argsio, data)\\n         headers[r'X-HgArgs-Post'] = len(strargs)\\n     elif args:\\n         # Calling self.capable() can infinite loop if we are calling\\n         # \\\"capabilities\\\". But that command should never accept wire\\n         # protocol arguments. So this should never happen.\\n         assert cmd != 'capabilities'\\n         httpheader = capablefn('httpheader')\\n         if httpheader:\\n             headersize = int(httpheader.split(',', 1)[0])\\n \\n         # Send arguments via HTTP headers.\\n         if headersize \\u003e 0:\\n             # The headers can typically carry more data than the URL.\\n             encargs = urlreq.urlencode(sorted(args.items()))\\n             for header, value in encodevalueinheaders(encargs, 'X-HgArg',\\n                                                       headersize):\\n                 headers[header] = value\\n         # Send arguments via query string (Mercurial \\u003c1.9).\\n         else:\\n             q += sorted(args.items())\\n \\n     qs = '?%s' % urlreq.urlencode(q)\\n     cu = \\\"%s%s\\\" % (repobaseurl, qs)\\n     size = 0\\n     if util.safehasattr(data, 'length'):\\n         size = data.length\\n     elif data is not None:\\n         size = len(data)\\n     if data is not None and r'Content-Type' not in headers:\\n         headers[r'Content-Type'] = r'application\\/mercurial-0.1'\\n \\n     # Tell the server we accept application\\/mercurial-0.2 and multiple\\n     # compression formats if the server is capable of emitting those\\n     # payloads.\\n     # Note: Keep this set empty by default, as client advertisement of\\n     # protocol parameters should only occur after the handshake.\\n     protoparams = set()\\n \\n     mediatypes = set()\\n     if caps is not None:\\n         mt = capablefn('httpmediatype')\\n         if mt:\\n             protoparams.add('0.1')\\n             mediatypes = set(mt.split(','))\\n \\n         protoparams.add('partial-pull')\\n \\n     if '0.2tx' in mediatypes:\\n         protoparams.add('0.2')\\n \\n     if '0.2tx' in mediatypes and capablefn('compression'):\\n         # We \\/could\\/ compare supported compression formats and prune\\n         # non-mutually supported or error if nothing is mutually supported.\\n         # For now, send the full list to the server and have it error.\\n         comps = [e.wireprotosupport().name for e in\\n                  util.compengines.supportedwireengines(util.CLIENTROLE)]\\n         protoparams.add('comp=%s' % ','.join(comps))\\n \\n     if protoparams:\\n         protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),\\n                                             'X-HgProto',\\n                                             headersize or 1024)\\n         for header, value in protoheaders:\\n             headers[header] = value\\n \\n     varyheaders = []\\n     for header in headers:\\n         if header.lower().startswith(r'x-hg'):\\n             varyheaders.append(header)\\n \\n     if varyheaders:\\n         headers[r'Vary'] = r','.join(sorted(varyheaders))\\n \\n     req = requestbuilder(pycompat.strurl(cu), data, headers)\\n \\n     if data is not None:\\n         ui.debug(\\\"sending %d bytes\\\\n\\\" % size)\\n         req.add_unredirected_header(r'Content-Length', r'%d' % size)\\n \\n     return req, cu, qs\\n \\n def _reqdata(req):\\n     \\\"\\\"\\\"Get request data, if any. If no data, returns None.\\\"\\\"\\\"\\n     if pycompat.ispy3:\\n         return req.data\\n     if not req.has_data():\\n         return None\\n     return req.get_data()\\n \\n def sendrequest(ui, opener, req):\\n     \\\"\\\"\\\"Send a prepared HTTP request.\\n \\n     Returns the response object.\\n     \\\"\\\"\\\"\\n     dbg = ui.debug\\n     if (ui.debugflag\\n         and ui.configbool('devel', 'debug.peer-request')):\\n         line = 'devel-peer-request: %s\\\\n'\\n         dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),\\n                               pycompat.bytesurl(req.get_full_url())))\\n         hgargssize = None\\n \\n         for header, value in sorted(req.header_items()):\\n             header = pycompat.bytesurl(header)\\n             value = pycompat.bytesurl(value)\\n             if header.startswith('X-hgarg-'):\\n                 if hgargssize is None:\\n                     hgargssize = 0\\n                 hgargssize += len(value)\\n             else:\\n                 dbg(line % '  %s %s' % (header, value))\\n \\n         if hgargssize is not None:\\n             dbg(line % '  %d bytes of commands arguments in headers'\\n                 % hgargssize)\\n         data = _reqdata(req)\\n         if data is not None:\\n             length = getattr(data, 'length', None)\\n             if length is None:\\n                 length = len(data)\\n             dbg(line % '  %d bytes of data' % length)\\n \\n         start = util.timer()\\n \\n     res = None\\n     try:\\n         res = opener.open(req)\\n     except urlerr.httperror as inst:\\n         if inst.code == 401:\\n             raise error.Abort(_('authorization failed'))\\n         raise\\n     except httplib.HTTPException as inst:\\n         ui.debug('http error requesting %s\\\\n' %\\n                  util.hidepassword(req.get_full_url()))\\n         ui.traceback()\\n         raise IOError(None, inst)\\n     finally:\\n         if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):\\n             code = res.code if res else -1\\n             dbg(line % '  finished in %.4f seconds (%d)'\\n                 % (util.timer() - start, code))\\n \\n     # Insert error handlers for common I\\/O failures.\\n     _wraphttpresponse(res)\\n \\n     return res\\n \\n class RedirectedRepoError(error.RepoError):\\n     def __init__(self, msg, respurl):\\n         super(RedirectedRepoError, self).__init__(msg)\\n         self.respurl = respurl\\n \\n def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,\\n                            allowcbor=False):\\n     # record the url we got redirected to\\n     redirected = False\\n     respurl = pycompat.bytesurl(resp.geturl())\\n     if respurl.endswith(qs):\\n         respurl = respurl[:-len(qs)]\\n         qsdropped = False\\n     else:\\n         qsdropped = True\\n \\n     if baseurl.rstrip('\\/') != respurl.rstrip('\\/'):\\n         redirected = True\\n         if not ui.quiet:\\n             ui.warn(_('real URL is %s\\\\n') % respurl)\\n \\n     try:\\n         proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))\\n     except AttributeError:\\n         proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))\\n \\n     safeurl = util.hidepassword(baseurl)\\n     if proto.startswith('application\\/hg-error'):\\n         raise error.OutOfBandError(resp.read())\\n \\n     # Pre 1.0 versions of Mercurial used text\\/plain and\\n     # application\\/hg-changegroup. We don't support such old servers.\\n     if not proto.startswith('application\\/mercurial-'):\\n         ui.debug(\\\"requested URL: '%s'\\\\n\\\" % util.hidepassword(requrl))\\n         msg = _(\\\"'%s' does not appear to be an hg repository:\\\\n\\\"\\n                 \\\"---%%\\u003c--- (%s)\\\\n%s\\\\n---%%\\u003c---\\\\n\\\") % (\\n             safeurl, proto or 'no content-type', resp.read(1024))\\n \\n         # Some servers may strip the query string from the redirect. We\\n         # raise a special error type so callers can react to this specially.\\n         if redirected and qsdropped:\\n             raise RedirectedRepoError(msg, respurl)\\n         else:\\n             raise error.RepoError(msg)\\n \\n     try:\\n         subtype = proto.split('-', 1)[1]\\n \\n         # Unless we end up supporting CBOR in the legacy wire protocol,\\n         # this should ONLY be encountered for the initial capabilities\\n         # request during handshake.\\n         if subtype == 'cbor':\\n             if allowcbor:\\n                 return respurl, proto, resp\\n             else:\\n                 raise error.RepoError(_('unexpected CBOR response from '\\n                                         'server'))\\n \\n         version_info = tuple([int(n) for n in subtype.split('.')])\\n     except ValueError:\\n         raise error.RepoError(_(\\\"'%s' sent a broken Content-Type \\\"\\n                                 \\\"header (%s)\\\") % (safeurl, proto))\\n \\n     # TODO consider switching to a decompression reader that uses\\n     # generators.\\n     if version_info == (0, 1):\\n         if compressible:\\n             resp = util.compengines['zlib'].decompressorreader(resp)\\n \\n     elif version_info == (0, 2):\\n         # application\\/mercurial-0.2 always identifies the compression\\n         # engine in the payload header.\\n         elen = struct.unpack('B', util.readexactly(resp, 1))[0]\\n         ename = util.readexactly(resp, elen)\\n         engine = util.compengines.forwiretype(ename)\\n \\n         resp = engine.decompressorreader(resp)\\n     else:\\n         raise error.RepoError(_(\\\"'%s' uses newer protocol %s\\\") %\\n                               (safeurl, subtype))\\n \\n     return respurl, proto, resp\\n \\n class httppeer(wireprotov1peer.wirepeer):\\n     def __init__(self, ui, path, url, opener, requestbuilder, caps):\\n         self.ui = ui\\n         self._path = path\\n         self._url = url\\n         self._caps = caps\\n         self._urlopener = opener\\n         self._requestbuilder = requestbuilder\\n \\n     def __del__(self):\\n         for h in self._urlopener.handlers:\\n             h.close()\\n             getattr(h, \\\"close_all\\\", lambda: None)()\\n \\n     # Begin of ipeerconnection interface.\\n \\n     def url(self):\\n         return self._path\\n \\n     def local(self):\\n         return None\\n \\n     def peer(self):\\n         return self\\n \\n     def canpush(self):\\n         return True\\n \\n     def close(self):\\n         pass\\n \\n     # End of ipeerconnection interface.\\n \\n     # Begin of ipeercommands interface.\\n \\n     def capabilities(self):\\n         return self._caps\\n \\n     # End of ipeercommands interface.\\n \\n     def _callstream(self, cmd, _compressible=False, **args):\\n         args = pycompat.byteskwargs(args)\\n \\n         req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,\\n                                            self._caps, self.capable,\\n                                            self._url, cmd, args)\\n \\n         resp = sendrequest(self.ui, self._urlopener, req)\\n \\n         self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,\\n                                                      resp, _compressible)\\n \\n         return resp\\n \\n     def _call(self, cmd, **args):\\n         fp = self._callstream(cmd, **args)\\n         try:\\n             return fp.read()\\n         finally:\\n             # if using keepalive, allow connection to be reused\\n             fp.close()\\n \\n     def _callpush(self, cmd, cg, **args):\\n         # have to stream bundle to a temp file because we do not have\\n         # http 1.1 chunked transfer.\\n \\n         types = self.capable('unbundle')\\n         try:\\n             types = types.split(',')\\n         except AttributeError:\\n             # servers older than d1b16a746db6 will send 'unbundle' as a\\n             # boolean capability. They only support headerless\\/uncompressed\\n             # bundles.\\n             types = [\\\"\\\"]\\n         for x in types:\\n             if x in bundle2.bundletypes:\\n                 type = x\\n                 break\\n \\n         tempname = bundle2.writebundle(self.ui, cg, None, type)\\n         fp = httpconnection.httpsendfile(self.ui, tempname, \\\"rb\\\")\\n         headers = {r'Content-Type': r'application\\/mercurial-0.1'}\\n \\n         try:\\n             r = self._call(cmd, data=fp, headers=headers, **args)\\n             vals = r.split('\\\\n', 1)\\n             if len(vals) \\u003c 2:\\n                 raise error.ResponseError(_(\\\"unexpected response:\\\"), r)\\n             return vals\\n         except urlerr.httperror:\\n             # Catch and re-raise these so we don't try and treat them\\n             # like generic socket errors. They lack any values in\\n             # .args on Python 3 which breaks our socket.error block.\\n             raise\\n         except socket.error as err:\\n             if err.args[0] in (errno.ECONNRESET, errno.EPIPE):\\n                 raise error.Abort(_('push failed: %s') % err.args[1])\\n             raise error.Abort(err.args[1])\\n         finally:\\n             fp.close()\\n             os.unlink(tempname)\\n \\n     def _calltwowaystream(self, cmd, fp, **args):\\n         fh = None\\n         fp_ = None\\n         filename = None\\n         try:\\n             # dump bundle to disk\\n             fd, filename = pycompat.mkstemp(prefix=\\\"hg-bundle-\\\", suffix=\\\".hg\\\")\\n             fh = os.fdopen(fd, r\\\"wb\\\")\\n             d = fp.read(4096)\\n             while d:\\n                 fh.write(d)\\n                 d = fp.read(4096)\\n             fh.close()\\n             # start http push\\n             fp_ = httpconnection.httpsendfile(self.ui, filename, \\\"rb\\\")\\n             headers = {r'Content-Type': r'application\\/mercurial-0.1'}\\n             return self._callstream(cmd, data=fp_, headers=headers, **args)\\n         finally:\\n             if fp_ is not None:\\n                 fp_.close()\\n             if fh is not None:\\n                 fh.close()\\n                 os.unlink(filename)\\n \\n     def _callcompressable(self, cmd, **args):\\n         return self._callstream(cmd, _compressible=True, **args)\\n \\n     def _abort(self, exception):\\n         raise exception\\n \\n def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):\\n     reactor = wireprotoframing.clientreactor(hasmultiplesend=False,\\n                                              buffersends=True)\\n \\n     handler = wireprotov2peer.clienthandler(ui, reactor)\\n \\n     url = '%s\\/%s' % (apiurl, permission)\\n \\n     if len(requests) \\u003e 1:\\n         url += '\\/multirequest'\\n     else:\\n         url += '\\/%s' % requests[0][0]\\n \\n     ui.debug('sending %d commands\\\\n' % len(requests))\\n     for command, args, f in requests:\\n         ui.debug('sending command %s: %s\\\\n' % (\\n             command, stringutil.pprint(args, indent=2)))\\n         assert not list(handler.callcommand(command, args, f))\\n \\n     # TODO stream this.\\n     body = b''.join(map(bytes, handler.flushcommands()))\\n \\n     # TODO modify user-agent to reflect v2\\n     headers = {\\n         r'Accept': wireprotov2server.FRAMINGTYPE,\\n         r'Content-Type': wireprotov2server.FRAMINGTYPE,\\n     }\\n \\n     req = requestbuilder(pycompat.strurl(url), body, headers)\\n     req.add_unredirected_header(r'Content-Length', r'%d' % len(body))\\n \\n     try:\\n         res = opener.open(req)\\n     except urlerr.httperror as e:\\n         if e.code == 401:\\n             raise error.Abort(_('authorization failed'))\\n \\n         raise\\n     except httplib.HTTPException as e:\\n         ui.traceback()\\n         raise IOError(None, e)\\n \\n     return handler, res\\n \\n class queuedcommandfuture(pycompat.futures.Future):\\n     \\\"\\\"\\\"Wraps result() on command futures to trigger submission on call.\\\"\\\"\\\"\\n \\n     def result(self, timeout=None):\\n         if self.done():\\n             return pycompat.futures.Future.result(self, timeout)\\n \\n         self._peerexecutor.sendcommands()\\n \\n         # sendcommands() will restore the original __class__ and self.result\\n         # will resolve to Future.result.\\n         return self.result(timeout)\\n \\n @interfaceutil.implementer(repository.ipeercommandexecutor)\\n class httpv2executor(object):\\n     def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):\\n         self._ui = ui\\n         self._opener = opener\\n         self._requestbuilder = requestbuilder\\n         self._apiurl = apiurl\\n         self._descriptor = descriptor\\n         self._sent = False\\n         self._closed = False\\n         self._neededpermissions = set()\\n         self._calls = []\\n         self._futures = weakref.WeakSet()\\n         self._responseexecutor = None\\n         self._responsef = None\\n \\n     def __enter__(self):\\n         return self\\n \\n     def __exit__(self, exctype, excvalue, exctb):\\n         self.close()\\n \\n     def callcommand(self, command, args):\\n         if self._sent:\\n             raise error.ProgrammingError('callcommand() cannot be used after '\\n                                          'commands are sent')\\n \\n         if self._closed:\\n             raise error.ProgrammingError('callcommand() cannot be used after '\\n                                          'close()')\\n \\n         # The service advertises which commands are available. So if we attempt\\n         # to call an unknown command or pass an unknown argument, we can screen\\n         # for this.\\n         if command not in self._descriptor['commands']:\\n             raise error.ProgrammingError(\\n                 'wire protocol command %s is not available' % command)\\n \\n         cmdinfo = self._descriptor['commands'][command]\\n         unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))\\n \\n         if unknownargs:\\n             raise error.ProgrammingError(\\n                 'wire protocol command %s does not accept argument: %s' % (\\n                     command, ', '.join(sorted(unknownargs))))\\n \\n         self._neededpermissions |= set(cmdinfo['permissions'])\\n \\n         # TODO we \\/could\\/ also validate types here, since the API descriptor\\n         # includes types...\\n \\n         f = pycompat.futures.Future()\\n \\n         # Monkeypatch it so result() triggers sendcommands(), otherwise result()\\n         # could deadlock.\\n         f.__class__ = queuedcommandfuture\\n         f._peerexecutor = self\\n \\n         self._futures.add(f)\\n         self._calls.append((command, args, f))\\n \\n         return f\\n \\n     def sendcommands(self):\\n         if self._sent:\\n             return\\n \\n         if not self._calls:\\n             return\\n \\n         self._sent = True\\n \\n         # Unhack any future types so caller sees a clean type and so we\\n         # break reference cycle.\\n         for f in self._futures:\\n             if isinstance(f, queuedcommandfuture):\\n                 f.__class__ = pycompat.futures.Future\\n                 f._peerexecutor = None\\n \\n         # Mark the future as running and filter out cancelled futures.\\n         calls = [(command, args, f)\\n                  for command, args, f in self._calls\\n                  if f.set_running_or_notify_cancel()]\\n \\n         # Clear out references, prevent improper object usage.\\n         self._calls = None\\n \\n         if not calls:\\n             return\\n \\n         permissions = set(self._neededpermissions)\\n \\n         if 'push' in permissions and 'pull' in permissions:\\n             permissions.remove('pull')\\n \\n         if len(permissions) \\u003e 1:\\n             raise error.RepoError(_('cannot make request requiring multiple '\\n                                     'permissions: %s') %\\n                                   _(', ').join(sorted(permissions)))\\n \\n         permission = {\\n             'push': 'rw',\\n             'pull': 'ro',\\n         }[permissions.pop()]\\n \\n         handler, resp = sendv2request(\\n             self._ui, self._opener, self._requestbuilder, self._apiurl,\\n             permission, calls)\\n \\n         # TODO we probably want to validate the HTTP code, media type, etc.\\n \\n         self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)\\n         self._responsef = self._responseexecutor.submit(self._handleresponse,\\n                                                         handler, resp)\\n \\n     def close(self):\\n         if self._closed:\\n             return\\n \\n         self.sendcommands()\\n \\n         self._closed = True\\n \\n         if not self._responsef:\\n             return\\n \\n         # TODO ^C here may not result in immediate program termination.\\n \\n         try:\\n             self._responsef.result()\\n         finally:\\n             self._responseexecutor.shutdown(wait=True)\\n             self._responsef = None\\n             self._responseexecutor = None\\n \\n             # If any of our futures are still in progress, mark them as\\n             # errored, otherwise a result() could wait indefinitely.\\n             for f in self._futures:\\n                 if not f.done():\\n                     f.set_exception(error.ResponseError(\\n                         _('unfulfilled command response')))\\n \\n             self._futures = None\\n \\n     def _handleresponse(self, handler, resp):\\n         # Called in a thread to read the response.\\n \\n         while handler.readframe(resp):\\n             pass\\n \\n # TODO implement interface for version 2 peers\\n @interfaceutil.implementer(repository.ipeerconnection,\\n                            repository.ipeercapabilities,\\n                            repository.ipeerrequests)\\n class httpv2peer(object):\\n     def __init__(self, ui, repourl, apipath, opener, requestbuilder,\\n                  apidescriptor):\\n         self.ui = ui\\n \\n         if repourl.endswith('\\/'):\\n             repourl = repourl[:-1]\\n \\n         self._url = repourl\\n         self._apipath = apipath\\n         self._apiurl = '%s\\/%s' % (repourl, apipath)\\n         self._opener = opener\\n         self._requestbuilder = requestbuilder\\n         self._descriptor = apidescriptor\\n \\n     # Start of ipeerconnection.\\n \\n     def url(self):\\n         return self._url\\n \\n     def local(self):\\n         return None\\n \\n     def peer(self):\\n         return self\\n \\n     def canpush(self):\\n         # TODO change once implemented.\\n         return False\\n \\n     def close(self):\\n         pass\\n \\n     # End of ipeerconnection.\\n \\n     # Start of ipeercapabilities.\\n \\n     def capable(self, name):\\n         # The capabilities used internally historically map to capabilities\\n         # advertised from the \\\"capabilities\\\" wire protocol command. However,\\n         # version 2 of that command works differently.\\n \\n         # Maps to commands that are available.\\n         if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):\\n             return True\\n \\n         # Other concepts.\\n-        if name in ('bundle2',):\\n+        # TODO remove exchangev2 once we have a command implemented.\\n+        if name in ('bundle2', 'exchangev2'):\\n             return True\\n \\n         # Alias command-* to presence of command of that name.\\n         if name.startswith('command-'):\\n             return name[len('command-'):] in self._descriptor['commands']\\n \\n         return False\\n \\n     def requirecap(self, name, purpose):\\n         if self.capable(name):\\n             return\\n \\n         raise error.CapabilityError(\\n             _('cannot %s; client or remote repository does not support the %r '\\n               'capability') % (purpose, name))\\n \\n     # End of ipeercapabilities.\\n \\n     def _call(self, name, **args):\\n         with self.commandexecutor() as e:\\n             return e.callcommand(name, args).result()\\n \\n     def commandexecutor(self):\\n         return httpv2executor(self.ui, self._opener, self._requestbuilder,\\n                               self._apiurl, self._descriptor)\\n \\n # Registry of API service names to metadata about peers that handle it.\\n #\\n # The following keys are meaningful:\\n #\\n # init\\n #    Callable receiving (ui, repourl, servicepath, opener, requestbuilder,\\n #                        apidescriptor) to create a peer.\\n #\\n # priority\\n #    Integer priority for the service. If we could choose from multiple\\n #    services, we choose the one with the highest priority.\\n API_PEERS = {\\n     wireprototypes.HTTP_WIREPROTO_V2: {\\n         'init': httpv2peer,\\n         'priority': 50,\\n     },\\n }\\n \\n def performhandshake(ui, url, opener, requestbuilder):\\n     # The handshake is a request to the capabilities command.\\n \\n     caps = None\\n     def capable(x):\\n         raise error.ProgrammingError('should not be called')\\n \\n     args = {}\\n \\n     # The client advertises support for newer protocols by adding an\\n     # X-HgUpgrade-* header with a list of supported APIs and an\\n     # X-HgProto-* header advertising which serializing formats it supports.\\n     # We only support the HTTP version 2 transport and CBOR responses for\\n     # now.\\n     advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')\\n \\n     if advertisev2:\\n         args['headers'] = {\\n             r'X-HgProto-1': r'cbor',\\n         }\\n \\n         args['headers'].update(\\n             encodevalueinheaders(' '.join(sorted(API_PEERS)),\\n                                  'X-HgUpgrade',\\n                                  # We don't know the header limit this early.\\n                                  # So make it small.\\n                                  1024))\\n \\n     req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,\\n                                            capable, url, 'capabilities',\\n                                            args)\\n     resp = sendrequest(ui, opener, req)\\n \\n     # The server may redirect us to the repo root, stripping the\\n     # ?cmd=capabilities query string from the URL. The server would likely\\n     # return HTML in this case and ``parsev1commandresponse()`` would raise.\\n     # We catch this special case and re-issue the capabilities request against\\n     # the new URL.\\n     #\\n     # We should ideally not do this, as a redirect that drops the query\\n     # string from the URL is arguably a server bug. (Garbage in, garbage out).\\n     # However,  Mercurial clients for several years appeared to handle this\\n     # issue without behavior degradation. And according to issue 5860, it may\\n     # be a longstanding bug in some server implementations. So we allow a\\n     # redirect that drops the query string to \\\"just work.\\\"\\n     try:\\n         respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,\\n                                                    compressible=False,\\n                                                    allowcbor=advertisev2)\\n     except RedirectedRepoError as e:\\n         req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,\\n                                                capable, e.respurl,\\n                                                'capabilities', args)\\n         resp = sendrequest(ui, opener, req)\\n         respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,\\n                                                    compressible=False,\\n                                                    allowcbor=advertisev2)\\n \\n     try:\\n         rawdata = resp.read()\\n     finally:\\n         resp.close()\\n \\n     if not ct.startswith('application\\/mercurial-'):\\n         raise error.ProgrammingError('unexpected content-type: %s' % ct)\\n \\n     if advertisev2:\\n         if ct == 'application\\/mercurial-cbor':\\n             try:\\n                 info = cborutil.decodeall(rawdata)[0]\\n             except cborutil.CBORDecodeError:\\n                 raise error.Abort(_('error decoding CBOR from remote server'),\\n                                   hint=_('try again and consider contacting '\\n                                          'the server operator'))\\n \\n         # We got a legacy response. That's fine.\\n         elif ct in ('application\\/mercurial-0.1', 'application\\/mercurial-0.2'):\\n             info = {\\n                 'v1capabilities': set(rawdata.split())\\n             }\\n \\n         else:\\n             raise error.RepoError(\\n                 _('unexpected response type from server: %s') % ct)\\n     else:\\n         info = {\\n             'v1capabilities': set(rawdata.split())\\n         }\\n \\n     return respurl, info\\n \\n def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):\\n     \\\"\\\"\\\"Construct an appropriate HTTP peer instance.\\n \\n     ``opener`` is an ``url.opener`` that should be used to establish\\n     connections, perform HTTP requests.\\n \\n     ``requestbuilder`` is the type used for constructing HTTP requests.\\n     It exists as an argument so extensions can override the default.\\n     \\\"\\\"\\\"\\n     u = util.url(path)\\n     if u.query or u.fragment:\\n         raise error.Abort(_('unsupported URL component: \\\"%s\\\"') %\\n                           (u.query or u.fragment))\\n \\n     # urllib cannot handle URLs with embedded user or passwd.\\n     url, authinfo = u.authinfo()\\n     ui.debug('using %s\\\\n' % url)\\n \\n     opener = opener or urlmod.opener(ui, authinfo)\\n \\n     respurl, info = performhandshake(ui, url, opener, requestbuilder)\\n \\n     # Given the intersection of APIs that both we and the server support,\\n     # sort by their advertised priority and pick the first one.\\n     #\\n     # TODO consider making this request-based and interface driven. For\\n     # example, the caller could say \\\"I want a peer that does X.\\\" It's quite\\n     # possible that not all peers would do that. Since we know the service\\n     # capabilities, we could filter out services not meeting the\\n     # requirements. Possibly by consulting the interfaces defined by the\\n     # peer type.\\n     apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())\\n \\n     preferredchoices = sorted(apipeerchoices,\\n                               key=lambda x: API_PEERS[x]['priority'],\\n                               reverse=True)\\n \\n     for service in preferredchoices:\\n         apipath = '%s\\/%s' % (info['apibase'].rstrip('\\/'), service)\\n \\n         return API_PEERS[service]['init'](ui, respurl, apipath, opener,\\n                                           requestbuilder,\\n                                           info['apis'][service])\\n \\n     # Failed to construct an API peer. Fall back to legacy.\\n     return httppeer(ui, path, respurl, opener, requestbuilder,\\n                     info['v1capabilities'])\\n \\n def instance(ui, path, create, intents=None, createopts=None):\\n     if create:\\n         raise error.Abort(_('cannot create new http repository'))\\n     try:\\n         if path.startswith('https:') and not urlmod.has_https:\\n             raise error.Abort(_('Python support for SSL and HTTPS '\\n                                 'is not installed'))\\n \\n         inst = makepeer(ui, path)\\n \\n         return inst\\n     except error.RepoError as httpexception:\\n         try:\\n             r = statichttprepo.instance(ui, \\\"static-\\\" + path, create)\\n             ui.note(_('(falling back to static-http)\\\\n'))\\n             return r\\n         except error.RepoError:\\n             raise httpexception # use the original http RepoError instead\\n\"}]},{\"id\":\"24368\",\"metadata\":{\"line:first\":1,\"copy:lines\":{\"4\":[\"mercurial\\/exchange.py\",4,\" \"],\"5\":[\"mercurial\\/exchange.py\",5,\" \"],\"6\":[\"mercurial\\/exchange.py\",6,\" \"],\"7\":[\"mercurial\\/exchange.py\",7,\" \"],\"8\":[\"mercurial\\/exchange.py\",8,\" \"],\"9\":[\"mercurial\\/exchange.py\",9,\" \"]}},\"oldPath\":null,\"currentPath\":\"mercurial\\/exchangev2.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"55\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"55\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+# exchangev2.py - repository exchange for wire protocol version 2\\n+#\\n+# Copyright 2018 Gregory Szorc \\u003cgregory.szorc@gmail.com\\u003e\\n+#\\n+# This software may be used and distributed according to the terms of the\\n+# GNU General Public License version 2 or any later version.\\n+\\n+from __future__ import absolute_import\\n+\\n+from .node import (\\n+    nullid,\\n+)\\n+from . import (\\n+    setdiscovery,\\n+)\\n+\\n+def pull(pullop):\\n+    \\\"\\\"\\\"Pull using wire protocol version 2.\\\"\\\"\\\"\\n+    repo = pullop.repo\\n+    remote = pullop.remote\\n+\\n+    # Figure out what needs to be fetched.\\n+    common, fetch, remoteheads = _pullchangesetdiscovery(\\n+        repo, remote, pullop.heads, abortwhenunrelated=pullop.force)\\n+\\n+def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):\\n+    \\\"\\\"\\\"Determine which changesets need to be pulled.\\\"\\\"\\\"\\n+\\n+    if heads:\\n+        knownnode = repo.changelog.hasnode\\n+        if all(knownnode(head) for head in heads):\\n+            return heads, False, heads\\n+\\n+    # TODO wire protocol version 2 is capable of more efficient discovery\\n+    # than setdiscovery. Consider implementing something better.\\n+    common, fetch, remoteheads = setdiscovery.findcommonheads(\\n+        repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated)\\n+\\n+    common = set(common)\\n+    remoteheads = set(remoteheads)\\n+\\n+    # If a remote head is filtered locally, put it back in the common set.\\n+    # See the comment in exchange._pulldiscoverychangegroup() for more.\\n+\\n+    if fetch and remoteheads:\\n+        nodemap = repo.unfiltered().changelog.nodemap\\n+\\n+        common |= {head for head in remoteheads if head in nodemap}\\n+\\n+        if set(remoteheads).issubset(common):\\n+            fetch = []\\n+\\n+    common.discard(nullid)\\n+\\n+    return common, fetch, remoteheads\\n\"}]},{\"id\":\"24367\",\"metadata\":{\"line:first\":29,\"copy:lines\":{\"1514\":[\"\",1509,\"-\"],\"1515\":[\"\",1510,\"-\"],\"1516\":[\"\",1511,\"-\"],\"1517\":[\"\",1512,\"-\"],\"1518\":[\"\",1513,\"-\"],\"1519\":[\"\",1514,\"-\"],\"1520\":[\"\",1515,\"-\"],\"1521\":[\"\",1516,\"-\"],\"1522\":[\"\",1517,\"-\"],\"1523\":[\"\",1518,\"-\"],\"1524\":[\"\",1519,\"-\"],\"1525\":[\"\",1520,\" \"],\"1526\":[\"\",1521,\" \"],\"1527\":[\"\",1522,\" \"],\"1528\":[\"\",1523,\" \"],\"1529\":[\"\",1524,\" \"],\"1530\":[\"\",1525,\" \"],\"1531\":[\"\",1526,\" \"],\"1532\":[\"\",1527,\" \"],\"1533\":[\"\",1528,\" \"],\"1534\":[\"\",1529,\" \"],\"1535\":[\"\",1530,\" \"],\"1536\":[\"\",1531,\" \"],\"1537\":[\"\",1532,\" \"],\"1538\":[\"\",1533,\" \"],\"1539\":[\"\",1534,\" \"],\"1540\":[\"\",1535,\" \"],\"1541\":[\"\",1536,\" \"],\"1542\":[\"\",1537,\" \"],\"1543\":[\"\",1538,\" \"],\"1544\":[\"\",1539,\" \"],\"1545\":[\"\",1540,\" \"],\"1546\":[\"\",1541,\" \"],\"1547\":[\"\",1542,\" \"],\"1548\":[\"\",1543,\" \"],\"1549\":[\"\",1544,\" \"],\"1550\":[\"\",1545,\" \"],\"1551\":[\"\",1546,\" \"],\"1552\":[\"\",1547,\" \"],\"1553\":[\"\",1548,\" \"],\"1554\":[\"\",1549,\" \"],\"1555\":[\"\",1550,\" \"],\"1556\":[\"\",1551,\" \"],\"1557\":[\"\",1552,\" \"],\"1558\":[\"\",1553,\" \"],\"1559\":[\"\",1554,\" \"],\"1560\":[\"\",1555,\" \"],\"1561\":[\"\",1556,\" \"],\"1562\":[\"\",1557,\" \"],\"1563\":[\"\",1558,\" \"],\"1564\":[\"\",1559,\" \"],\"1565\":[\"\",1560,\" \"],\"1566\":[\"\",1561,\" \"],\"1567\":[\"\",1562,\" \"],\"1568\":[\"\",1563,\" \"],\"1569\":[\"\",1564,\" \"],\"1570\":[\"\",1565,\" \"],\"1571\":[\"\",1566,\" \"],\"1572\":[\"\",1567,\" \"],\"1573\":[\"\",1568,\" \"],\"1574\":[\"\",1569,\" \"],\"1575\":[\"\",1570,\" \"],\"1576\":[\"\",1571,\" \"],\"1577\":[\"\",1572,\" \"],\"1578\":[\"\",1573,\" \"],\"1579\":[\"\",1574,\" \"],\"1580\":[\"\",1575,\" \"],\"1581\":[\"\",1576,\" \"],\"1582\":[\"\",1577,\" \"],\"1583\":[\"\",1578,\" \"],\"1584\":[\"\",1579,\" \"],\"1585\":[\"\",1580,\" \"],\"1586\":[\"\",1581,\" \"],\"1587\":[\"\",1582,\" \"],\"1588\":[\"\",1583,\" \"],\"1589\":[\"\",1584,\" \"],\"1590\":[\"\",1585,\" \"],\"1591\":[\"\",1586,\" \"],\"1592\":[\"\",1587,\" \"],\"1593\":[\"\",1588,\" \"],\"1594\":[\"\",1589,\" \"],\"1595\":[\"\",1590,\" \"],\"1596\":[\"\",1591,\" \"],\"1597\":[\"\",1592,\" \"],\"1598\":[\"\",1593,\" \"],\"1599\":[\"\",1594,\" \"],\"1600\":[\"\",1595,\" \"],\"1601\":[\"\",1596,\" \"],\"1602\":[\"\",1597,\" \"],\"1603\":[\"\",1598,\" \"],\"1604\":[\"\",1599,\" \"],\"1605\":[\"\",1600,\" \"],\"1606\":[\"\",1601,\" \"],\"1607\":[\"\",1602,\" \"],\"1608\":[\"\",1603,\" \"],\"1609\":[\"\",1604,\" \"],\"1610\":[\"\",1605,\" \"],\"1611\":[\"\",1606,\" \"],\"1612\":[\"\",1607,\" \"],\"1613\":[\"\",1608,\" \"],\"1614\":[\"\",1609,\" \"],\"1615\":[\"\",1610,\" \"],\"1616\":[\"\",1611,\" \"],\"1617\":[\"\",1612,\" \"],\"1618\":[\"\",1613,\" \"],\"1619\":[\"\",1614,\" \"],\"1620\":[\"\",1615,\" \"],\"1621\":[\"\",1616,\" \"],\"1622\":[\"\",1617,\" \"],\"1623\":[\"\",1618,\" \"],\"1624\":[\"\",1619,\" \"],\"1625\":[\"\",1620,\" \"],\"1626\":[\"\",1621,\" \"],\"1627\":[\"\",1622,\" \"],\"1628\":[\"\",1623,\" \"],\"1629\":[\"\",1624,\" \"],\"1630\":[\"\",1625,\" \"],\"1631\":[\"\",1626,\" \"],\"1632\":[\"\",1627,\" \"],\"1633\":[\"\",1628,\" \"],\"1634\":[\"\",1629,\" \"],\"1635\":[\"\",1630,\" \"],\"1636\":[\"\",1631,\" \"],\"1637\":[\"\",1632,\" \"],\"1638\":[\"\",1633,\" \"],\"1639\":[\"\",1634,\" \"],\"1640\":[\"\",1635,\" \"],\"1641\":[\"\",1636,\" \"],\"1642\":[\"\",1637,\" \"],\"1643\":[\"\",1638,\" \"],\"1644\":[\"\",1639,\" \"],\"1645\":[\"\",1640,\" \"],\"1646\":[\"\",1641,\" \"],\"1647\":[\"\",1642,\" \"],\"1648\":[\"\",1643,\" \"],\"1649\":[\"\",1644,\" \"],\"1650\":[\"\",1645,\" \"],\"1651\":[\"\",1646,\" \"],\"1652\":[\"\",1647,\" \"],\"1653\":[\"\",1648,\" \"],\"1654\":[\"\",1649,\" \"],\"1655\":[\"\",1650,\" \"],\"1656\":[\"\",1651,\" \"],\"1657\":[\"\",1652,\" \"],\"1658\":[\"\",1653,\" \"],\"1659\":[\"\",1654,\" \"],\"1660\":[\"\",1655,\" \"],\"1661\":[\"\",1656,\" \"],\"1662\":[\"\",1657,\" \"],\"1663\":[\"\",1658,\" \"],\"1664\":[\"\",1659,\" \"],\"1665\":[\"\",1660,\" \"],\"1666\":[\"\",1661,\" \"],\"1667\":[\"\",1662,\" \"],\"1668\":[\"\",1663,\" \"],\"1669\":[\"\",1664,\" \"],\"1670\":[\"\",1665,\" \"],\"1671\":[\"\",1666,\" \"],\"1672\":[\"\",1667,\" \"],\"1673\":[\"\",1668,\" \"],\"1674\":[\"\",1669,\" \"],\"1675\":[\"\",1670,\" \"],\"1676\":[\"\",1671,\" \"],\"1677\":[\"\",1672,\" \"],\"1678\":[\"\",1673,\" \"],\"1679\":[\"\",1674,\" \"],\"1680\":[\"\",1675,\" \"],\"1681\":[\"\",1676,\" \"],\"1682\":[\"\",1677,\" \"],\"1683\":[\"\",1678,\" \"],\"1684\":[\"\",1679,\" \"],\"1685\":[\"\",1680,\" \"],\"1686\":[\"\",1681,\" \"],\"1687\":[\"\",1682,\" \"],\"1688\":[\"\",1683,\" \"],\"1689\":[\"\",1684,\" \"],\"1690\":[\"\",1685,\" \"],\"1691\":[\"\",1686,\" \"],\"1692\":[\"\",1687,\" \"],\"1693\":[\"\",1688,\" \"],\"1694\":[\"\",1689,\" \"],\"1695\":[\"\",1690,\" \"],\"1696\":[\"\",1691,\" \"],\"1697\":[\"\",1692,\" \"],\"1698\":[\"\",1693,\" \"],\"1699\":[\"\",1694,\" \"],\"1700\":[\"\",1695,\" \"],\"1701\":[\"\",1696,\" \"],\"1702\":[\"\",1697,\" \"],\"1703\":[\"\",1698,\" \"],\"1704\":[\"\",1699,\" \"],\"1705\":[\"\",1700,\" \"],\"1706\":[\"\",1701,\" \"],\"1707\":[\"\",1702,\" \"],\"1708\":[\"\",1703,\" \"],\"1709\":[\"\",1704,\" \"],\"1710\":[\"\",1705,\" \"],\"1711\":[\"\",1706,\" \"],\"1712\":[\"\",1707,\" \"],\"1713\":[\"\",1708,\" \"],\"1714\":[\"\",1709,\" \"],\"1715\":[\"\",1710,\" \"],\"1716\":[\"\",1711,\" \"],\"1717\":[\"\",1712,\" \"],\"1718\":[\"\",1713,\" \"],\"1719\":[\"\",1714,\" \"],\"1720\":[\"\",1715,\" \"],\"1721\":[\"\",1716,\" \"],\"1722\":[\"\",1717,\" \"],\"1723\":[\"\",1718,\" \"],\"1724\":[\"\",1719,\" \"],\"1725\":[\"\",1720,\" \"],\"1726\":[\"\",1721,\" \"],\"1727\":[\"\",1722,\" \"],\"1728\":[\"\",1723,\" \"],\"1729\":[\"\",1724,\" \"],\"1730\":[\"\",1725,\" \"],\"1731\":[\"\",1726,\" \"],\"1732\":[\"\",1727,\" \"],\"1733\":[\"\",1728,\" \"],\"1734\":[\"\",1729,\" \"],\"1735\":[\"\",1730,\" \"],\"1736\":[\"\",1731,\" \"],\"1737\":[\"\",1732,\" \"],\"1738\":[\"\",1733,\" \"],\"1739\":[\"\",1734,\" \"],\"1740\":[\"\",1735,\" \"],\"1741\":[\"\",1736,\" \"],\"1742\":[\"\",1737,\" \"],\"1743\":[\"\",1738,\" \"],\"1744\":[\"\",1739,\" \"],\"1745\":[\"\",1740,\" \"],\"1746\":[\"\",1741,\" \"],\"1747\":[\"\",1742,\" \"],\"1748\":[\"\",1743,\" \"],\"1749\":[\"\",1744,\" \"],\"1750\":[\"\",1745,\" \"],\"1751\":[\"\",1746,\" \"],\"1752\":[\"\",1747,\" \"],\"1753\":[\"\",1748,\" \"],\"1754\":[\"\",1749,\" \"],\"1755\":[\"\",1750,\" \"],\"1756\":[\"\",1751,\" \"],\"1757\":[\"\",1752,\" \"],\"1758\":[\"\",1753,\" \"],\"1759\":[\"\",1754,\" \"],\"1760\":[\"\",1755,\" \"],\"1761\":[\"\",1756,\" \"],\"1762\":[\"\",1757,\" \"],\"1763\":[\"\",1758,\" \"],\"1764\":[\"\",1759,\" \"],\"1765\":[\"\",1760,\" \"],\"1766\":[\"\",1761,\" \"],\"1767\":[\"\",1762,\" \"],\"1768\":[\"\",1763,\" \"],\"1769\":[\"\",1764,\" \"],\"1770\":[\"\",1765,\" \"],\"1771\":[\"\",1766,\" \"],\"1772\":[\"\",1767,\" \"],\"1773\":[\"\",1768,\" \"],\"1774\":[\"\",1769,\" \"],\"1775\":[\"\",1770,\" \"],\"1776\":[\"\",1771,\" \"],\"1777\":[\"\",1772,\" \"],\"1778\":[\"\",1773,\" \"],\"1779\":[\"\",1774,\" \"],\"1780\":[\"\",1775,\" \"],\"1781\":[\"\",1776,\" \"],\"1782\":[\"\",1777,\" \"],\"1783\":[\"\",1778,\" \"],\"1784\":[\"\",1779,\" \"],\"1785\":[\"\",1780,\" \"],\"1786\":[\"\",1781,\" \"],\"1787\":[\"\",1782,\" \"],\"1788\":[\"\",1783,\" \"],\"1789\":[\"\",1784,\" \"],\"1790\":[\"\",1785,\" \"],\"1791\":[\"\",1786,\" \"],\"1792\":[\"\",1787,\" \"],\"1793\":[\"\",1788,\" \"],\"1794\":[\"\",1789,\" \"],\"1795\":[\"\",1790,\" \"],\"1796\":[\"\",1791,\" \"],\"1797\":[\"\",1792,\" \"],\"1798\":[\"\",1793,\" \"],\"1799\":[\"\",1794,\" \"],\"1800\":[\"\",1795,\" \"],\"1801\":[\"\",1796,\" \"],\"1802\":[\"\",1797,\" \"],\"1803\":[\"\",1798,\" \"],\"1804\":[\"\",1799,\" \"],\"1805\":[\"\",1800,\" \"],\"1806\":[\"\",1801,\" \"],\"1807\":[\"\",1802,\" \"],\"1808\":[\"\",1803,\" \"],\"1809\":[\"\",1804,\" \"],\"1810\":[\"\",1805,\" \"],\"1811\":[\"\",1806,\" \"],\"1812\":[\"\",1807,\" \"],\"1813\":[\"\",1808,\" \"],\"1814\":[\"\",1809,\" \"],\"1815\":[\"\",1810,\" \"],\"1816\":[\"\",1811,\" \"],\"1817\":[\"\",1812,\" \"],\"1818\":[\"\",1813,\" \"],\"1819\":[\"\",1814,\" \"],\"1820\":[\"\",1815,\" \"],\"1821\":[\"\",1816,\" \"],\"1822\":[\"\",1817,\" \"],\"1823\":[\"\",1818,\" \"],\"1824\":[\"\",1819,\" \"],\"1825\":[\"\",1820,\" \"],\"1826\":[\"\",1821,\" \"],\"1827\":[\"\",1822,\" \"],\"1828\":[\"\",1823,\" \"],\"1829\":[\"\",1824,\" \"],\"1830\":[\"\",1825,\" \"],\"1831\":[\"\",1826,\" \"],\"1832\":[\"\",1827,\" \"],\"1833\":[\"\",1828,\" \"],\"1834\":[\"\",1829,\" \"],\"1835\":[\"\",1830,\" \"],\"1836\":[\"\",1831,\" \"],\"1837\":[\"\",1832,\" \"],\"1838\":[\"\",1833,\" \"],\"1839\":[\"\",1834,\" \"],\"1840\":[\"\",1835,\" \"],\"1841\":[\"\",1836,\" \"],\"1842\":[\"\",1837,\" \"],\"1843\":[\"\",1838,\" \"],\"1844\":[\"\",1839,\" \"],\"1845\":[\"\",1840,\" \"],\"1846\":[\"\",1841,\" \"],\"1847\":[\"\",1842,\" \"],\"1848\":[\"\",1843,\" \"],\"1849\":[\"\",1844,\" \"],\"1850\":[\"\",1845,\" \"],\"1851\":[\"\",1846,\" \"],\"1852\":[\"\",1847,\" \"],\"1853\":[\"\",1848,\" \"],\"1854\":[\"\",1849,\" \"],\"1855\":[\"\",1850,\" \"],\"1856\":[\"\",1851,\" \"],\"1857\":[\"\",1852,\" \"],\"1858\":[\"\",1853,\" \"],\"1859\":[\"\",1854,\" \"],\"1860\":[\"\",1855,\" \"],\"1861\":[\"\",1856,\" \"],\"1862\":[\"\",1857,\" \"],\"1863\":[\"\",1858,\" \"],\"1864\":[\"\",1859,\" \"],\"1865\":[\"\",1860,\" \"],\"1866\":[\"\",1861,\" \"],\"1867\":[\"\",1862,\" \"],\"1868\":[\"\",1863,\" \"],\"1869\":[\"\",1864,\" \"],\"1870\":[\"\",1865,\" \"],\"1871\":[\"\",1866,\" \"],\"1872\":[\"\",1867,\" \"],\"1873\":[\"\",1868,\" \"],\"1874\":[\"\",1869,\" \"],\"1875\":[\"\",1870,\" \"],\"1876\":[\"\",1871,\" \"],\"1877\":[\"\",1872,\" \"],\"1878\":[\"\",1873,\" \"],\"1879\":[\"\",1874,\" \"],\"1880\":[\"\",1875,\" \"],\"1881\":[\"\",1876,\" \"],\"1882\":[\"\",1877,\" \"],\"1883\":[\"\",1878,\" \"],\"1884\":[\"\",1879,\" \"],\"1885\":[\"\",1880,\" \"],\"1886\":[\"\",1881,\" \"],\"1887\":[\"\",1882,\" \"],\"1888\":[\"\",1883,\" \"],\"1889\":[\"\",1884,\" \"],\"1890\":[\"\",1885,\" \"],\"1891\":[\"\",1886,\" \"],\"1892\":[\"\",1887,\" \"],\"1893\":[\"\",1888,\" \"],\"1894\":[\"\",1889,\" \"],\"1895\":[\"\",1890,\" \"],\"1896\":[\"\",1891,\" \"],\"1897\":[\"\",1892,\" \"],\"1898\":[\"\",1893,\" \"],\"1899\":[\"\",1894,\" \"],\"1900\":[\"\",1895,\" \"],\"1901\":[\"\",1896,\" \"],\"1902\":[\"\",1897,\" \"],\"1903\":[\"\",1898,\" \"],\"1904\":[\"\",1899,\" \"],\"1905\":[\"\",1900,\" \"],\"1906\":[\"\",1901,\" \"],\"1907\":[\"\",1902,\" \"],\"1908\":[\"\",1903,\" \"],\"1909\":[\"\",1904,\" \"],\"1910\":[\"\",1905,\" \"],\"1911\":[\"\",1906,\" \"],\"1912\":[\"\",1907,\" \"],\"1913\":[\"\",1908,\" \"],\"1914\":[\"\",1909,\" \"],\"1915\":[\"\",1910,\" \"],\"1916\":[\"\",1911,\" \"],\"1917\":[\"\",1912,\" \"],\"1918\":[\"\",1913,\" \"],\"1919\":[\"\",1914,\" \"],\"1920\":[\"\",1915,\" \"],\"1921\":[\"\",1916,\" \"],\"1922\":[\"\",1917,\" \"],\"1923\":[\"\",1918,\" \"],\"1924\":[\"\",1919,\" \"],\"1925\":[\"\",1920,\" \"],\"1926\":[\"\",1921,\" \"],\"1927\":[\"\",1922,\" \"],\"1928\":[\"\",1923,\" \"],\"1929\":[\"\",1924,\" \"],\"1930\":[\"\",1925,\" \"],\"1931\":[\"\",1926,\" \"],\"1932\":[\"\",1927,\" \"],\"1933\":[\"\",1928,\" \"],\"1934\":[\"\",1929,\" \"],\"1935\":[\"\",1930,\" \"],\"1936\":[\"\",1931,\" \"],\"1937\":[\"\",1932,\" \"],\"1938\":[\"\",1933,\" \"],\"1939\":[\"\",1934,\" \"],\"1940\":[\"\",1935,\" \"],\"1941\":[\"\",1936,\" \"],\"1942\":[\"\",1937,\" \"],\"1943\":[\"\",1938,\" \"],\"1944\":[\"\",1939,\" \"],\"1945\":[\"\",1940,\" \"],\"1946\":[\"\",1941,\" \"],\"1947\":[\"\",1942,\" \"],\"1948\":[\"\",1943,\" \"],\"1949\":[\"\",1944,\" \"],\"1950\":[\"\",1945,\" \"],\"1951\":[\"\",1946,\" \"],\"1952\":[\"\",1947,\" \"],\"1953\":[\"\",1948,\" \"],\"1954\":[\"\",1949,\" \"],\"1955\":[\"\",1950,\" \"],\"1956\":[\"\",1951,\" \"],\"1957\":[\"\",1952,\" \"],\"1958\":[\"\",1953,\" \"],\"1959\":[\"\",1954,\" \"],\"1960\":[\"\",1955,\" \"],\"1961\":[\"\",1956,\" \"],\"1962\":[\"\",1957,\" \"],\"1963\":[\"\",1958,\" \"],\"1964\":[\"\",1959,\" \"],\"1965\":[\"\",1960,\" \"],\"1966\":[\"\",1961,\" \"],\"1967\":[\"\",1962,\" \"],\"1968\":[\"\",1963,\" \"],\"1969\":[\"\",1964,\" \"],\"1970\":[\"\",1965,\" \"],\"1971\":[\"\",1966,\" \"],\"1972\":[\"\",1967,\" \"],\"1973\":[\"\",1968,\" \"],\"1974\":[\"\",1969,\" \"],\"1975\":[\"\",1970,\" \"],\"1976\":[\"\",1971,\" \"],\"1977\":[\"\",1972,\" \"],\"1978\":[\"\",1973,\" \"],\"1979\":[\"\",1974,\" \"],\"1980\":[\"\",1975,\" \"],\"1981\":[\"\",1976,\" \"],\"1982\":[\"\",1977,\" \"],\"1983\":[\"\",1978,\" \"],\"1984\":[\"\",1979,\" \"],\"1985\":[\"\",1980,\" \"],\"1986\":[\"\",1981,\" \"],\"1987\":[\"\",1982,\" \"],\"1988\":[\"\",1983,\" \"],\"1989\":[\"\",1984,\" \"],\"1990\":[\"\",1985,\" \"],\"1991\":[\"\",1986,\" \"],\"1992\":[\"\",1987,\" \"],\"1993\":[\"\",1988,\" \"],\"1994\":[\"\",1989,\" \"],\"1995\":[\"\",1990,\" \"],\"1996\":[\"\",1991,\" \"],\"1997\":[\"\",1992,\" \"],\"1998\":[\"\",1993,\" \"],\"1999\":[\"\",1994,\" \"],\"2000\":[\"\",1995,\" \"],\"2001\":[\"\",1996,\" \"],\"2002\":[\"\",1997,\" \"],\"2003\":[\"\",1998,\" \"],\"2004\":[\"\",1999,\" \"],\"2005\":[\"\",2000,\" \"],\"2006\":[\"\",2001,\" \"],\"2007\":[\"\",2002,\" \"],\"2008\":[\"\",2003,\" \"],\"2009\":[\"\",2004,\" \"],\"2010\":[\"\",2005,\" \"],\"2011\":[\"\",2006,\" \"],\"2012\":[\"\",2007,\" \"],\"2013\":[\"\",2008,\" \"],\"2014\":[\"\",2009,\" \"],\"2015\":[\"\",2010,\" \"],\"2016\":[\"\",2011,\" \"],\"2017\":[\"\",2012,\" \"],\"2018\":[\"\",2013,\" \"],\"2019\":[\"\",2014,\" \"],\"2020\":[\"\",2015,\" \"],\"2021\":[\"\",2016,\" \"],\"2022\":[\"\",2017,\" \"],\"2023\":[\"\",2018,\" \"],\"2024\":[\"\",2019,\" \"],\"2025\":[\"\",2020,\" \"],\"2026\":[\"\",2021,\" \"],\"2027\":[\"\",2022,\" \"],\"2028\":[\"\",2023,\" \"],\"2029\":[\"\",2024,\" \"],\"2030\":[\"\",2025,\" \"],\"2031\":[\"\",2026,\" \"],\"2032\":[\"\",2027,\" \"],\"2033\":[\"\",2028,\" \"],\"2034\":[\"\",2029,\" \"],\"2035\":[\"\",2030,\" \"],\"2036\":[\"\",2031,\" \"],\"2037\":[\"\",2032,\" \"],\"2038\":[\"\",2033,\" \"],\"2039\":[\"\",2034,\" \"],\"2040\":[\"\",2035,\" \"],\"2041\":[\"\",2036,\" \"],\"2042\":[\"\",2037,\" \"],\"2043\":[\"\",2038,\" \"],\"2044\":[\"\",2039,\" \"],\"2045\":[\"\",2040,\" \"],\"2046\":[\"\",2041,\" \"],\"2047\":[\"\",2042,\" \"],\"2048\":[\"\",2043,\" \"],\"2049\":[\"\",2044,\" \"],\"2050\":[\"\",2045,\" \"],\"2051\":[\"\",2046,\" \"],\"2052\":[\"\",2047,\" \"],\"2053\":[\"\",2048,\" \"],\"2054\":[\"\",2049,\" \"],\"2055\":[\"\",2050,\" \"],\"2056\":[\"\",2051,\" \"],\"2057\":[\"\",2052,\" \"],\"2058\":[\"\",2053,\" \"],\"2059\":[\"\",2054,\" \"],\"2060\":[\"\",2055,\" \"],\"2061\":[\"\",2056,\" \"],\"2062\":[\"\",2057,\" \"],\"2063\":[\"\",2058,\" \"],\"2064\":[\"\",2059,\" \"],\"2065\":[\"\",2060,\" \"],\"2066\":[\"\",2061,\" \"],\"2067\":[\"\",2062,\" \"],\"2068\":[\"\",2063,\" \"],\"2069\":[\"\",2064,\" \"],\"2070\":[\"\",2065,\" \"],\"2071\":[\"\",2066,\" \"],\"2072\":[\"\",2067,\" \"],\"2073\":[\"\",2068,\" \"],\"2074\":[\"\",2069,\" \"],\"2075\":[\"\",2070,\" \"],\"2076\":[\"\",2071,\" \"],\"2077\":[\"\",2072,\" \"],\"2078\":[\"\",2073,\" \"],\"2079\":[\"\",2074,\" \"],\"2080\":[\"\",2075,\" \"],\"2081\":[\"\",2076,\" \"],\"2082\":[\"\",2077,\" \"],\"2083\":[\"\",2078,\" \"],\"2084\":[\"\",2079,\" \"],\"2085\":[\"\",2080,\" \"],\"2086\":[\"\",2081,\" \"],\"2087\":[\"\",2082,\" \"],\"2088\":[\"\",2083,\" \"],\"2089\":[\"\",2084,\" \"],\"2090\":[\"\",2085,\" \"],\"2091\":[\"\",2086,\" \"],\"2092\":[\"\",2087,\" \"],\"2093\":[\"\",2088,\" \"],\"2094\":[\"\",2089,\" \"],\"2095\":[\"\",2090,\" \"],\"2096\":[\"\",2091,\" \"],\"2097\":[\"\",2092,\" \"],\"2098\":[\"\",2093,\" \"],\"2099\":[\"\",2094,\" \"],\"2100\":[\"\",2095,\" \"],\"2101\":[\"\",2096,\" \"],\"2102\":[\"\",2097,\" \"],\"2103\":[\"\",2098,\" \"],\"2104\":[\"\",2099,\" \"],\"2105\":[\"\",2100,\" \"],\"2106\":[\"\",2101,\" \"],\"2107\":[\"\",2102,\" \"],\"2108\":[\"\",2103,\" \"],\"2109\":[\"\",2104,\" \"],\"2110\":[\"\",2105,\" \"],\"2111\":[\"\",2106,\" \"],\"2112\":[\"\",2107,\" \"],\"2113\":[\"\",2108,\" \"],\"2114\":[\"\",2109,\" \"],\"2115\":[\"\",2110,\" \"],\"2116\":[\"\",2111,\" \"],\"2117\":[\"\",2112,\" \"],\"2118\":[\"\",2113,\" \"],\"2119\":[\"\",2114,\" \"],\"2120\":[\"\",2115,\" \"],\"2121\":[\"\",2116,\" \"],\"2122\":[\"\",2117,\" \"],\"2123\":[\"\",2118,\" \"],\"2124\":[\"\",2119,\" \"],\"2125\":[\"\",2120,\" \"],\"2126\":[\"\",2121,\" \"],\"2127\":[\"\",2122,\" \"],\"2128\":[\"\",2123,\" \"],\"2129\":[\"\",2124,\" \"],\"2130\":[\"\",2125,\" \"],\"2131\":[\"\",2126,\" \"],\"2132\":[\"\",2127,\" \"],\"2133\":[\"\",2128,\" \"],\"2134\":[\"\",2129,\" \"],\"2135\":[\"\",2130,\" \"],\"2136\":[\"\",2131,\" \"],\"2137\":[\"\",2132,\" \"],\"2138\":[\"\",2133,\" \"],\"2139\":[\"\",2134,\" \"],\"2140\":[\"\",2135,\" \"],\"2141\":[\"\",2136,\" \"],\"2142\":[\"\",2137,\" \"],\"2143\":[\"\",2138,\" \"],\"2144\":[\"\",2139,\" \"],\"2145\":[\"\",2140,\" \"],\"2146\":[\"\",2141,\" \"],\"2147\":[\"\",2142,\" \"],\"2148\":[\"\",2143,\" \"],\"2149\":[\"\",2144,\" \"],\"2150\":[\"\",2145,\" \"],\"2151\":[\"\",2146,\" \"],\"2152\":[\"\",2147,\" \"],\"2153\":[\"\",2148,\" \"],\"2154\":[\"\",2149,\" \"],\"2155\":[\"\",2150,\" \"],\"2156\":[\"\",2151,\" \"],\"2157\":[\"\",2152,\" \"],\"2158\":[\"\",2153,\" \"],\"2159\":[\"\",2154,\" \"],\"2160\":[\"\",2155,\" \"],\"2161\":[\"\",2156,\" \"],\"2162\":[\"\",2157,\" \"],\"2163\":[\"\",2158,\" \"],\"2164\":[\"\",2159,\" \"],\"2165\":[\"\",2160,\" \"],\"2166\":[\"\",2161,\" \"],\"2167\":[\"\",2162,\" \"],\"2168\":[\"\",2163,\" \"],\"2169\":[\"\",2164,\" \"],\"2170\":[\"\",2165,\" \"],\"2171\":[\"\",2166,\" \"],\"2172\":[\"\",2167,\" \"],\"2173\":[\"\",2168,\" \"],\"2174\":[\"\",2169,\" \"],\"2175\":[\"\",2170,\" \"],\"2176\":[\"\",2171,\" \"],\"2177\":[\"\",2172,\" \"],\"2178\":[\"\",2173,\" \"],\"2179\":[\"\",2174,\" \"],\"2180\":[\"\",2175,\" \"],\"2181\":[\"\",2176,\" \"],\"2182\":[\"\",2177,\" \"],\"2183\":[\"\",2178,\" \"],\"2184\":[\"\",2179,\" \"],\"2185\":[\"\",2180,\" \"],\"2186\":[\"\",2181,\" \"],\"2187\":[\"\",2182,\" \"],\"2188\":[\"\",2183,\" \"],\"2189\":[\"\",2184,\" \"],\"2190\":[\"\",2185,\" \"],\"2191\":[\"\",2186,\" \"],\"2192\":[\"\",2187,\" \"],\"2193\":[\"\",2188,\" \"],\"2194\":[\"\",2189,\" \"],\"2195\":[\"\",2190,\" \"],\"2196\":[\"\",2191,\" \"],\"2197\":[\"\",2192,\" \"],\"2198\":[\"\",2193,\" \"],\"2199\":[\"\",2194,\" \"],\"2200\":[\"\",2195,\" \"],\"2201\":[\"\",2196,\" \"],\"2202\":[\"\",2197,\" \"],\"2203\":[\"\",2198,\" \"],\"2204\":[\"\",2199,\" \"],\"2205\":[\"\",2200,\" \"],\"2206\":[\"\",2201,\" \"],\"2207\":[\"\",2202,\" \"],\"2208\":[\"\",2203,\" \"],\"2209\":[\"\",2204,\" \"],\"2210\":[\"\",2205,\" \"],\"2211\":[\"\",2206,\" \"],\"2212\":[\"\",2207,\" \"],\"2213\":[\"\",2208,\" \"],\"2214\":[\"\",2209,\" \"],\"2215\":[\"\",2210,\" \"],\"2216\":[\"\",2211,\" \"],\"2217\":[\"\",2212,\" \"],\"2218\":[\"\",2213,\" \"],\"2219\":[\"\",2214,\" \"],\"2220\":[\"\",2215,\" \"],\"2221\":[\"\",2216,\" \"],\"2222\":[\"\",2217,\" \"],\"2223\":[\"\",2218,\" \"],\"2224\":[\"\",2219,\" \"],\"2225\":[\"\",2220,\" \"],\"2226\":[\"\",2221,\" \"],\"2227\":[\"\",2222,\" \"],\"2228\":[\"\",2223,\" \"],\"2229\":[\"\",2224,\" \"],\"2230\":[\"\",2225,\" \"],\"2231\":[\"\",2226,\" \"],\"2232\":[\"\",2227,\" \"],\"2233\":[\"\",2228,\" \"],\"2234\":[\"\",2229,\" \"],\"2235\":[\"\",2230,\" \"],\"2236\":[\"\",2231,\" \"],\"2237\":[\"\",2232,\" \"],\"2238\":[\"\",2233,\" \"],\"2239\":[\"\",2234,\" \"],\"2240\":[\"\",2235,\" \"],\"2241\":[\"\",2236,\" \"],\"2242\":[\"\",2237,\" \"],\"2243\":[\"\",2238,\" \"],\"2244\":[\"\",2239,\" \"],\"2245\":[\"\",2240,\" \"],\"2246\":[\"\",2241,\" \"],\"2247\":[\"\",2242,\" \"],\"2248\":[\"\",2243,\" \"],\"2249\":[\"\",2244,\" \"],\"2250\":[\"\",2245,\" \"],\"2251\":[\"\",2246,\" \"],\"2252\":[\"\",2247,\" \"],\"2253\":[\"\",2248,\" \"],\"2254\":[\"\",2249,\" \"],\"2255\":[\"\",2250,\" \"],\"2256\":[\"\",2251,\" \"],\"2257\":[\"\",2252,\" \"],\"2258\":[\"\",2253,\" \"],\"2259\":[\"\",2254,\" \"],\"2260\":[\"\",2255,\" \"],\"2261\":[\"\",2256,\" \"],\"2262\":[\"\",2257,\" \"],\"2263\":[\"\",2258,\" \"],\"2264\":[\"\",2259,\" \"],\"2265\":[\"\",2260,\" \"],\"2266\":[\"\",2261,\" \"],\"2267\":[\"\",2262,\" \"],\"2268\":[\"\",2263,\" \"],\"2269\":[\"\",2264,\" \"],\"2270\":[\"\",2265,\" \"],\"2271\":[\"\",2266,\" \"],\"2272\":[\"\",2267,\" \"],\"2273\":[\"\",2268,\" \"],\"2274\":[\"\",2269,\" \"],\"2275\":[\"\",2270,\" \"],\"2276\":[\"\",2271,\" \"],\"2277\":[\"\",2272,\" \"],\"2278\":[\"\",2273,\" \"],\"2279\":[\"\",2274,\" \"],\"2280\":[\"\",2275,\" \"],\"2281\":[\"\",2276,\" \"],\"2282\":[\"\",2277,\" \"],\"2283\":[\"\",2278,\" \"],\"2284\":[\"\",2279,\" \"],\"2285\":[\"\",2280,\" \"],\"2286\":[\"\",2281,\" \"],\"2287\":[\"\",2282,\" \"],\"2288\":[\"\",2283,\" \"],\"2289\":[\"\",2284,\" \"],\"2290\":[\"\",2285,\" \"],\"2291\":[\"\",2286,\" \"],\"2292\":[\"\",2287,\" \"],\"2293\":[\"\",2288,\" \"],\"2294\":[\"\",2289,\" \"],\"2295\":[\"\",2290,\" \"],\"2296\":[\"\",2291,\" \"],\"2297\":[\"\",2292,\" \"],\"2298\":[\"\",2293,\" \"],\"2299\":[\"\",2294,\" \"],\"2300\":[\"\",2295,\" \"],\"2301\":[\"\",2296,\" \"],\"2302\":[\"\",2297,\" \"],\"2303\":[\"\",2298,\" \"],\"2304\":[\"\",2299,\" \"],\"2305\":[\"\",2300,\" \"],\"2306\":[\"\",2301,\" \"],\"2307\":[\"\",2302,\" \"],\"2308\":[\"\",2303,\" \"],\"2309\":[\"\",2304,\" \"],\"2310\":[\"\",2305,\" \"],\"2311\":[\"\",2306,\" \"],\"2312\":[\"\",2307,\" \"],\"2313\":[\"\",2308,\" \"],\"2314\":[\"\",2309,\" \"],\"2315\":[\"\",2310,\" \"],\"2316\":[\"\",2311,\" \"],\"2317\":[\"\",2312,\" \"],\"2318\":[\"\",2313,\" \"],\"2319\":[\"\",2314,\" \"],\"2320\":[\"\",2315,\" \"],\"2321\":[\"\",2316,\" \"],\"2322\":[\"\",2317,\" \"],\"2323\":[\"\",2318,\" \"],\"2324\":[\"\",2319,\" \"],\"2325\":[\"\",2320,\" \"],\"2326\":[\"\",2321,\" \"],\"2327\":[\"\",2322,\" \"],\"2328\":[\"\",2323,\" \"],\"2329\":[\"\",2324,\" \"],\"2330\":[\"\",2325,\" \"],\"2331\":[\"\",2326,\" \"],\"2332\":[\"\",2327,\" \"],\"2333\":[\"\",2328,\" \"],\"2334\":[\"\",2329,\" \"],\"2335\":[\"\",2330,\" \"],\"2336\":[\"\",2331,\" \"],\"2337\":[\"\",2332,\" \"],\"2338\":[\"\",2333,\" \"],\"2339\":[\"\",2334,\" \"],\"2340\":[\"\",2335,\" \"],\"2341\":[\"\",2336,\" \"],\"2342\":[\"\",2337,\" \"],\"2343\":[\"\",2338,\" \"],\"2344\":[\"\",2339,\" \"],\"2345\":[\"\",2340,\" \"],\"2346\":[\"\",2341,\" \"],\"2347\":[\"\",2342,\" \"],\"2348\":[\"\",2343,\" \"],\"2349\":[\"\",2344,\" \"],\"2350\":[\"\",2345,\" \"],\"2351\":[\"\",2346,\" \"],\"2352\":[\"\",2347,\" \"],\"2353\":[\"\",2348,\" \"],\"2354\":[\"\",2349,\" \"],\"2355\":[\"\",2350,\" \"],\"2356\":[\"\",2351,\" \"],\"2357\":[\"\",2352,\" \"],\"2358\":[\"\",2353,\" \"],\"2359\":[\"\",2354,\" \"],\"2360\":[\"\",2355,\" \"],\"2361\":[\"\",2356,\" \"],\"2362\":[\"\",2357,\" \"],\"2363\":[\"\",2358,\" \"],\"2364\":[\"\",2359,\" \"],\"2365\":[\"\",2360,\" \"],\"2366\":[\"\",2361,\" \"],\"2367\":[\"\",2362,\" \"],\"2368\":[\"\",2363,\" \"],\"2369\":[\"\",2364,\" \"],\"2370\":[\"\",2365,\" \"],\"2371\":[\"\",2366,\" \"],\"2372\":[\"\",2367,\" \"],\"2373\":[\"\",2368,\" \"],\"2374\":[\"\",2369,\" \"],\"2375\":[\"\",2370,\" \"],\"2376\":[\"\",2371,\" \"],\"2377\":[\"\",2372,\" \"],\"2378\":[\"\",2373,\" \"],\"2379\":[\"\",2374,\" \"],\"2380\":[\"\",2375,\" \"],\"2381\":[\"\",2376,\" \"],\"2382\":[\"\",2377,\" \"],\"2383\":[\"\",2378,\" \"],\"2384\":[\"\",2379,\" \"],\"2385\":[\"\",2380,\" \"],\"2386\":[\"\",2381,\" \"],\"2387\":[\"\",2382,\" \"],\"2388\":[\"\",2383,\" \"],\"2389\":[\"\",2384,\" \"],\"2390\":[\"\",2385,\" \"],\"2391\":[\"\",2386,\" \"],\"2392\":[\"\",2387,\" \"],\"2393\":[\"\",2388,\" \"],\"2394\":[\"\",2389,\" \"],\"2395\":[\"\",2390,\" \"],\"2396\":[\"\",2391,\" \"],\"2397\":[\"\",2392,\" \"],\"2398\":[\"\",2393,\" \"],\"2399\":[\"\",2394,\" \"],\"2400\":[\"\",2395,\" \"],\"2401\":[\"\",2396,\" \"],\"2402\":[\"\",2397,\" \"],\"2403\":[\"\",2398,\" \"],\"2404\":[\"\",2399,\" \"],\"2405\":[\"\",2400,\" \"],\"2406\":[\"\",2401,\" \"],\"2407\":[\"\",2402,\" \"],\"2408\":[\"\",2403,\" \"],\"2409\":[\"\",2404,\" \"],\"2410\":[\"\",2405,\" \"],\"2411\":[\"\",2406,\" \"],\"2412\":[\"\",2407,\" \"],\"2413\":[\"\",2408,\" \"],\"2414\":[\"\",2409,\" \"],\"2415\":[\"\",2410,\" \"],\"2416\":[\"\",2411,\" \"],\"2417\":[\"\",2412,\" \"],\"2418\":[\"\",2413,\" \"],\"2419\":[\"\",2414,\" \"],\"2420\":[\"\",2415,\" \"],\"2421\":[\"\",2416,\" \"],\"2422\":[\"\",2417,\" \"],\"2423\":[\"\",2418,\" \"],\"2424\":[\"\",2419,\" \"],\"2425\":[\"\",2420,\" \"],\"2426\":[\"\",2421,\" \"],\"2427\":[\"\",2422,\" \"],\"2428\":[\"\",2423,\" \"],\"2429\":[\"\",2424,\" \"],\"2430\":[\"\",2425,\" \"],\"2431\":[\"\",2426,\" \"],\"2432\":[\"\",2427,\" \"],\"2433\":[\"\",2428,\" \"],\"2434\":[\"\",2429,\" \"],\"2435\":[\"\",2430,\" \"],\"2436\":[\"\",2431,\" \"],\"2437\":[\"\",2432,\" \"],\"2438\":[\"\",2433,\" \"],\"2439\":[\"\",2434,\" \"],\"2440\":[\"\",2435,\" \"],\"2441\":[\"\",2436,\" \"],\"2442\":[\"\",2437,\" \"],\"2443\":[\"\",2438,\" \"],\"2444\":[\"\",2439,\" \"],\"2445\":[\"\",2440,\" \"],\"2446\":[\"\",2441,\" \"],\"2447\":[\"\",2442,\" \"],\"2448\":[\"\",2443,\" \"],\"2449\":[\"\",2444,\" \"],\"2450\":[\"\",2445,\" \"],\"2451\":[\"\",2446,\" \"],\"2452\":[\"\",2447,\" \"],\"2453\":[\"\",2448,\" \"],\"2454\":[\"\",2449,\" \"],\"2455\":[\"\",2450,\" \"],\"2456\":[\"\",2451,\" \"],\"2457\":[\"\",2452,\" \"],\"2458\":[\"\",2453,\" \"],\"2459\":[\"\",2454,\" \"],\"2460\":[\"\",2455,\" \"],\"2461\":[\"\",2456,\" \"],\"2462\":[\"\",2457,\" \"],\"2463\":[\"\",2458,\" \"],\"2464\":[\"\",2459,\" \"],\"2465\":[\"\",2460,\" \"],\"2466\":[\"\",2461,\" \"],\"2467\":[\"\",2462,\" \"],\"2468\":[\"\",2463,\" \"],\"2469\":[\"\",2464,\" \"],\"2470\":[\"\",2465,\" \"],\"2471\":[\"\",2466,\" \"],\"2472\":[\"\",2467,\" \"],\"2473\":[\"\",2468,\" \"],\"2474\":[\"\",2469,\" \"],\"2475\":[\"\",2470,\" \"],\"2476\":[\"\",2471,\" \"],\"2477\":[\"\",2472,\" \"],\"2478\":[\"\",2473,\" \"],\"2479\":[\"\",2474,\" \"],\"2480\":[\"\",2475,\" \"],\"2481\":[\"\",2476,\" \"],\"2482\":[\"\",2477,\" \"],\"2483\":[\"\",2478,\" \"],\"2484\":[\"\",2479,\" \"],\"2485\":[\"\",2480,\" \"],\"2486\":[\"\",2481,\" \"],\"2487\":[\"\",2482,\" \"],\"2488\":[\"\",2483,\" \"],\"2489\":[\"\",2484,\" \"],\"2490\":[\"\",2485,\" \"],\"2491\":[\"\",2486,\" \"],\"2492\":[\"\",2487,\" \"],\"2493\":[\"\",2488,\" \"],\"2494\":[\"\",2489,\" \"],\"2495\":[\"\",2490,\" \"],\"2496\":[\"\",2491,\" \"],\"2497\":[\"\",2492,\" \"],\"2498\":[\"\",2493,\" \"],\"2499\":[\"\",2494,\" \"],\"2500\":[\"\",2495,\" \"],\"2501\":[\"\",2496,\" \"],\"2502\":[\"\",2497,\" \"],\"2503\":[\"\",2498,\" \"],\"2504\":[\"\",2499,\" \"],\"2505\":[\"\",2500,\" \"],\"2506\":[\"\",2501,\" \"],\"2507\":[\"\",2502,\" \"],\"2508\":[\"\",2503,\" \"],\"2509\":[\"\",2504,\" \"],\"2510\":[\"\",2505,\" \"],\"2511\":[\"\",2506,\" \"],\"2512\":[\"\",2507,\" \"],\"2513\":[\"\",2508,\" \"],\"2514\":[\"\",2509,\" \"],\"2515\":[\"\",2510,\" \"],\"2516\":[\"\",2511,\" \"],\"2517\":[\"\",2512,\" \"],\"2518\":[\"\",2513,\" \"],\"2519\":[\"\",2514,\" \"],\"2520\":[\"\",2515,\" \"],\"2521\":[\"\",2516,\" \"],\"2522\":[\"\",2517,\" \"],\"2523\":[\"\",2518,\" \"],\"2524\":[\"\",2519,\" \"],\"2525\":[\"\",2520,\" \"],\"2526\":[\"\",2521,\" \"],\"2527\":[\"\",2522,\" \"],\"2528\":[\"\",2523,\" \"],\"2529\":[\"\",2524,\" \"],\"2530\":[\"\",2525,\" \"],\"2531\":[\"\",2526,\" \"],\"2532\":[\"\",2527,\" \"],\"2533\":[\"\",2528,\" \"],\"2534\":[\"\",2529,\" \"],\"2535\":[\"\",2530,\" \"],\"2536\":[\"\",2531,\" \"],\"2537\":[\"\",2532,\" \"],\"2538\":[\"\",2533,\" \"],\"2539\":[\"\",2534,\" \"],\"2540\":[\"\",2535,\" \"],\"2541\":[\"\",2536,\" \"],\"2542\":[\"\",2537,\" \"],\"2543\":[\"\",2538,\" \"],\"2544\":[\"\",2539,\" \"],\"2545\":[\"\",2540,\" \"],\"2546\":[\"\",2541,\" \"],\"2547\":[\"\",2542,\" \"],\"2548\":[\"\",2543,\" \"],\"2549\":[\"\",2544,\" \"],\"2550\":[\"\",2545,\" \"],\"2551\":[\"\",2546,\" \"],\"2552\":[\"\",2547,\" \"],\"2553\":[\"\",2548,\" \"],\"2554\":[\"\",2549,\" \"],\"2555\":[\"\",2550,\" \"],\"2556\":[\"\",2551,\" \"],\"2557\":[\"\",2552,\" \"],\"2558\":[\"\",2553,\" \"],\"2559\":[\"\",2554,\" \"],\"2560\":[\"\",2555,\" \"],\"2561\":[\"\",2556,\" \"],\"2562\":[\"\",2557,\" \"],\"2563\":[\"\",2558,\" \"],\"2564\":[\"\",2559,\" \"],\"2565\":[\"\",2560,\" \"],\"2566\":[\"\",2561,\" \"],\"2567\":[\"\",2562,\" \"],\"2568\":[\"\",2563,\" \"],\"2569\":[\"\",2564,\" \"],\"2570\":[\"\",2565,\" \"],\"2571\":[\"\",2566,\" \"],\"2572\":[\"\",2567,\" \"],\"2573\":[\"\",2568,\" \"],\"2574\":[\"\",2569,\" \"],\"2575\":[\"\",2570,\" \"],\"2576\":[\"\",2571,\" \"],\"2577\":[\"\",2572,\" \"],\"2578\":[\"\",2573,\" \"],\"2579\":[\"\",2574,\" \"],\"2580\":[\"\",2575,\" \"],\"2581\":[\"\",2576,\" \"],\"2582\":[\"\",2577,\" \"],\"2583\":[\"\",2578,\" \"],\"2584\":[\"\",2579,\" \"],\"2585\":[\"\",2580,\" \"],\"2586\":[\"\",2581,\" \"],\"2587\":[\"\",2582,\" \"],\"2588\":[\"\",2583,\" \"],\"2589\":[\"\",2584,\" \"],\"2590\":[\"\",2585,\" \"],\"2591\":[\"\",2586,\" \"],\"2592\":[\"\",2587,\" \"],\"2593\":[\"\",2588,\" \"],\"2594\":[\"\",2589,\" \"],\"2595\":[\"\",2590,\" \"],\"2596\":[\"\",2591,\" \"],\"2597\":[\"\",2592,\" \"],\"2598\":[\"\",2593,\" \"],\"2599\":[\"\",2594,\" \"],\"2600\":[\"\",2595,\" \"],\"2601\":[\"\",2596,\" \"],\"2602\":[\"\",2597,\" \"],\"2603\":[\"\",2598,\" \"],\"2604\":[\"\",2599,\" \"],\"2605\":[\"\",2600,\" \"],\"2606\":[\"\",2601,\" \"],\"2607\":[\"\",2602,\" \"],\"2608\":[\"\",2603,\" \"],\"2609\":[\"\",2604,\" \"],\"2610\":[\"\",2605,\" \"],\"2611\":[\"\",2606,\" \"],\"2612\":[\"\",2607,\" \"],\"2613\":[\"\",2608,\" \"],\"2614\":[\"\",2609,\" \"],\"2615\":[\"\",2610,\" \"],\"2616\":[\"\",2611,\" \"],\"2617\":[\"\",2612,\" \"],\"2618\":[\"\",2613,\" \"],\"2619\":[\"\",2614,\" \"],\"2620\":[\"\",2615,\" \"],\"2621\":[\"\",2616,\" \"],\"2622\":[\"\",2617,\" \"],\"2623\":[\"\",2618,\" \"],\"2624\":[\"\",2619,\" \"],\"2625\":[\"\",2620,\" \"],\"2626\":[\"\",2621,\" \"],\"2627\":[\"\",2622,\" \"],\"2628\":[\"\",2623,\" \"],\"2629\":[\"\",2624,\" \"],\"2630\":[\"\",2625,\" \"],\"2631\":[\"\",2626,\" \"],\"2632\":[\"\",2627,\" \"],\"2633\":[\"\",2628,\" \"],\"2634\":[\"\",2629,\" \"],\"2635\":[\"\",2630,\" \"],\"2636\":[\"\",2631,\" \"],\"2637\":[\"\",2632,\" \"],\"2638\":[\"\",2633,\" \"],\"2639\":[\"\",2634,\" \"],\"2640\":[\"\",2635,\" \"],\"2641\":[\"\",2636,\" \"],\"2642\":[\"\",2637,\" \"],\"2643\":[\"\",2638,\" \"],\"2644\":[\"\",2639,\" \"],\"2645\":[\"\",2640,\" \"],\"2646\":[\"\",2641,\" \"],\"2647\":[\"\",2642,\" \"],\"2648\":[\"\",2643,\" \"],\"2649\":[\"\",2644,\" \"]}},\"oldPath\":\"mercurial\\/exchange.py\",\"currentPath\":\"mercurial\\/exchange.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"16\",\"delLines\":\"11\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"2644\",\"newLength\":\"2649\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" # exchange.py - utility to exchange data between repos.\\n #\\n # Copyright 2005-2007 Matt Mackall \\u003cmpm@selenic.com\\u003e\\n #\\n # This software may be used and distributed according to the terms of the\\n # GNU General Public License version 2 or any later version.\\n \\n from __future__ import absolute_import\\n \\n import collections\\n import hashlib\\n \\n from .i18n import _\\n from .node import (\\n     bin,\\n     hex,\\n     nullid,\\n     nullrev,\\n )\\n from .thirdparty import (\\n     attr,\\n )\\n from . import (\\n     bookmarks as bookmod,\\n     bundle2,\\n     changegroup,\\n     discovery,\\n     error,\\n+    exchangev2,\\n     lock as lockmod,\\n     logexchange,\\n     narrowspec,\\n     obsolete,\\n     phases,\\n     pushkey,\\n     pycompat,\\n     repository,\\n     scmutil,\\n     sslutil,\\n     streamclone,\\n     url as urlmod,\\n     util,\\n )\\n from .utils import (\\n     stringutil,\\n )\\n \\n urlerr = util.urlerr\\n urlreq = util.urlreq\\n \\n _NARROWACL_SECTION = 'narrowhgacl'\\n \\n # Maps bundle version human names to changegroup versions.\\n _bundlespeccgversions = {'v1': '01',\\n                          'v2': '02',\\n                          'packed1': 's1',\\n                          'bundle2': '02', #legacy\\n                         }\\n \\n # Maps bundle version with content opts to choose which part to bundle\\n _bundlespeccontentopts = {\\n     'v1': {\\n         'changegroup': True,\\n         'cg.version': '01',\\n         'obsolescence': False,\\n         'phases': False,\\n         'tagsfnodescache': False,\\n         'revbranchcache': False\\n     },\\n     'v2': {\\n         'changegroup': True,\\n         'cg.version': '02',\\n         'obsolescence': False,\\n         'phases': False,\\n         'tagsfnodescache': True,\\n         'revbranchcache': True\\n     },\\n     'packed1' : {\\n         'cg.version': 's1'\\n     }\\n }\\n _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']\\n \\n _bundlespecvariants = {\\\"streamv2\\\": {\\\"changegroup\\\": False, \\\"streamv2\\\": True,\\n                                     \\\"tagsfnodescache\\\": False,\\n                                     \\\"revbranchcache\\\": False}}\\n \\n # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.\\n _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}\\n \\n @attr.s\\n class bundlespec(object):\\n     compression = attr.ib()\\n     wirecompression = attr.ib()\\n     version = attr.ib()\\n     wireversion = attr.ib()\\n     params = attr.ib()\\n     contentopts = attr.ib()\\n \\n def parsebundlespec(repo, spec, strict=True):\\n     \\\"\\\"\\\"Parse a bundle string specification into parts.\\n \\n     Bundle specifications denote a well-defined bundle\\/exchange format.\\n     The content of a given specification should not change over time in\\n     order to ensure that bundles produced by a newer version of Mercurial are\\n     readable from an older version.\\n \\n     The string currently has the form:\\n \\n        \\u003ccompression\\u003e-\\u003ctype\\u003e[;\\u003cparameter0\\u003e[;\\u003cparameter1\\u003e]]\\n \\n     Where \\u003ccompression\\u003e is one of the supported compression formats\\n     and \\u003ctype\\u003e is (currently) a version string. A \\\";\\\" can follow the type and\\n     all text afterwards is interpreted as URI encoded, \\\";\\\" delimited key=value\\n     pairs.\\n \\n     If ``strict`` is True (the default) \\u003ccompression\\u003e is required. Otherwise,\\n     it is optional.\\n \\n     Returns a bundlespec object of (compression, version, parameters).\\n     Compression will be ``None`` if not in strict mode and a compression isn't\\n     defined.\\n \\n     An ``InvalidBundleSpecification`` is raised when the specification is\\n     not syntactically well formed.\\n \\n     An ``UnsupportedBundleSpecification`` is raised when the compression or\\n     bundle type\\/version is not recognized.\\n \\n     Note: this function will likely eventually return a more complex data\\n     structure, including bundle2 part information.\\n     \\\"\\\"\\\"\\n     def parseparams(s):\\n         if ';' not in s:\\n             return s, {}\\n \\n         params = {}\\n         version, paramstr = s.split(';', 1)\\n \\n         for p in paramstr.split(';'):\\n             if '=' not in p:\\n                 raise error.InvalidBundleSpecification(\\n                     _('invalid bundle specification: '\\n                       'missing \\\"=\\\" in parameter: %s') % p)\\n \\n             key, value = p.split('=', 1)\\n             key = urlreq.unquote(key)\\n             value = urlreq.unquote(value)\\n             params[key] = value\\n \\n         return version, params\\n \\n \\n     if strict and '-' not in spec:\\n         raise error.InvalidBundleSpecification(\\n                 _('invalid bundle specification; '\\n                   'must be prefixed with compression: %s') % spec)\\n \\n     if '-' in spec:\\n         compression, version = spec.split('-', 1)\\n \\n         if compression not in util.compengines.supportedbundlenames:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('%s compression is not supported') % compression)\\n \\n         version, params = parseparams(version)\\n \\n         if version not in _bundlespeccgversions:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('%s is not a recognized bundle version') % version)\\n     else:\\n         # Value could be just the compression or just the version, in which\\n         # case some defaults are assumed (but only when not in strict mode).\\n         assert not strict\\n \\n         spec, params = parseparams(spec)\\n \\n         if spec in util.compengines.supportedbundlenames:\\n             compression = spec\\n             version = 'v1'\\n             # Generaldelta repos require v2.\\n             if 'generaldelta' in repo.requirements:\\n                 version = 'v2'\\n             # Modern compression engines require v2.\\n             if compression not in _bundlespecv1compengines:\\n                 version = 'v2'\\n         elif spec in _bundlespeccgversions:\\n             if spec == 'packed1':\\n                 compression = 'none'\\n             else:\\n                 compression = 'bzip2'\\n             version = spec\\n         else:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('%s is not a recognized bundle specification') % spec)\\n \\n     # Bundle version 1 only supports a known set of compression engines.\\n     if version == 'v1' and compression not in _bundlespecv1compengines:\\n         raise error.UnsupportedBundleSpecification(\\n             _('compression engine %s is not supported on v1 bundles') %\\n             compression)\\n \\n     # The specification for packed1 can optionally declare the data formats\\n     # required to apply it. If we see this metadata, compare against what the\\n     # repo supports and error if the bundle isn't compatible.\\n     if version == 'packed1' and 'requirements' in params:\\n         requirements = set(params['requirements'].split(','))\\n         missingreqs = requirements - repo.supportedformats\\n         if missingreqs:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('missing support for repository features: %s') %\\n                       ', '.join(sorted(missingreqs)))\\n \\n     # Compute contentopts based on the version\\n     contentopts = _bundlespeccontentopts.get(version, {}).copy()\\n \\n     # Process the variants\\n     if \\\"stream\\\" in params and params[\\\"stream\\\"] == \\\"v2\\\":\\n         variant = _bundlespecvariants[\\\"streamv2\\\"]\\n         contentopts.update(variant)\\n \\n     engine = util.compengines.forbundlename(compression)\\n     compression, wirecompression = engine.bundletype()\\n     wireversion = _bundlespeccgversions[version]\\n \\n     return bundlespec(compression, wirecompression, version, wireversion,\\n                       params, contentopts)\\n \\n def readbundle(ui, fh, fname, vfs=None):\\n     header = changegroup.readexactly(fh, 4)\\n \\n     alg = None\\n     if not fname:\\n         fname = \\\"stream\\\"\\n         if not header.startswith('HG') and header.startswith('\\\\0'):\\n             fh = changegroup.headerlessfixup(fh, header)\\n             header = \\\"HG10\\\"\\n             alg = 'UN'\\n     elif vfs:\\n         fname = vfs.join(fname)\\n \\n     magic, version = header[0:2], header[2:4]\\n \\n     if magic != 'HG':\\n         raise error.Abort(_('%s: not a Mercurial bundle') % fname)\\n     if version == '10':\\n         if alg is None:\\n             alg = changegroup.readexactly(fh, 2)\\n         return changegroup.cg1unpacker(fh, alg)\\n     elif version.startswith('2'):\\n         return bundle2.getunbundler(ui, fh, magicstring=magic + version)\\n     elif version == 'S1':\\n         return streamclone.streamcloneapplier(fh)\\n     else:\\n         raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))\\n \\n def getbundlespec(ui, fh):\\n     \\\"\\\"\\\"Infer the bundlespec from a bundle file handle.\\n \\n     The input file handle is seeked and the original seek position is not\\n     restored.\\n     \\\"\\\"\\\"\\n     def speccompression(alg):\\n         try:\\n             return util.compengines.forbundletype(alg).bundletype()[0]\\n         except KeyError:\\n             return None\\n \\n     b = readbundle(ui, fh, None)\\n     if isinstance(b, changegroup.cg1unpacker):\\n         alg = b._type\\n         if alg == '_truncatedBZ':\\n             alg = 'BZ'\\n         comp = speccompression(alg)\\n         if not comp:\\n             raise error.Abort(_('unknown compression algorithm: %s') % alg)\\n         return '%s-v1' % comp\\n     elif isinstance(b, bundle2.unbundle20):\\n         if 'Compression' in b.params:\\n             comp = speccompression(b.params['Compression'])\\n             if not comp:\\n                 raise error.Abort(_('unknown compression algorithm: %s') % comp)\\n         else:\\n             comp = 'none'\\n \\n         version = None\\n         for part in b.iterparts():\\n             if part.type == 'changegroup':\\n                 version = part.params['version']\\n                 if version in ('01', '02'):\\n                     version = 'v2'\\n                 else:\\n                     raise error.Abort(_('changegroup version %s does not have '\\n                                         'a known bundlespec') % version,\\n                                       hint=_('try upgrading your Mercurial '\\n                                               'client'))\\n             elif part.type == 'stream2' and version is None:\\n                 # A stream2 part requires to be part of a v2 bundle\\n                 version = \\\"v2\\\"\\n                 requirements = urlreq.unquote(part.params['requirements'])\\n                 splitted = requirements.split()\\n                 params = bundle2._formatrequirementsparams(splitted)\\n                 return 'none-v2;stream=v2;%s' % params\\n \\n         if not version:\\n             raise error.Abort(_('could not identify changegroup version in '\\n                                 'bundle'))\\n \\n         return '%s-%s' % (comp, version)\\n     elif isinstance(b, streamclone.streamcloneapplier):\\n         requirements = streamclone.readbundle1header(fh)[2]\\n         formatted = bundle2._formatrequirementsparams(requirements)\\n         return 'none-packed1;%s' % formatted\\n     else:\\n         raise error.Abort(_('unknown bundle type: %s') % b)\\n \\n def _computeoutgoing(repo, heads, common):\\n     \\\"\\\"\\\"Computes which revs are outgoing given a set of common\\n     and a set of heads.\\n \\n     This is a separate function so extensions can have access to\\n     the logic.\\n \\n     Returns a discovery.outgoing object.\\n     \\\"\\\"\\\"\\n     cl = repo.changelog\\n     if common:\\n         hasnode = cl.hasnode\\n         common = [n for n in common if hasnode(n)]\\n     else:\\n         common = [nullid]\\n     if not heads:\\n         heads = cl.heads()\\n     return discovery.outgoing(repo, common, heads)\\n \\n def _forcebundle1(op):\\n     \\\"\\\"\\\"return true if a pull\\/push must use bundle1\\n \\n     This function is used to allow testing of the older bundle version\\\"\\\"\\\"\\n     ui = op.repo.ui\\n     # The goal is this config is to allow developer to choose the bundle\\n     # version used during exchanged. This is especially handy during test.\\n     # Value is a list of bundle version to be picked from, highest version\\n     # should be used.\\n     #\\n     # developer config: devel.legacy.exchange\\n     exchange = ui.configlist('devel', 'legacy.exchange')\\n     forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange\\n     return forcebundle1 or not op.remote.capable('bundle2')\\n \\n class pushoperation(object):\\n     \\\"\\\"\\\"A object that represent a single push operation\\n \\n     Its purpose is to carry push related state and very common operations.\\n \\n     A new pushoperation should be created at the beginning of each push and\\n     discarded afterward.\\n     \\\"\\\"\\\"\\n \\n     def __init__(self, repo, remote, force=False, revs=None, newbranch=False,\\n                  bookmarks=(), pushvars=None):\\n         # repo we push from\\n         self.repo = repo\\n         self.ui = repo.ui\\n         # repo we push to\\n         self.remote = remote\\n         # force option provided\\n         self.force = force\\n         # revs to be pushed (None is \\\"all\\\")\\n         self.revs = revs\\n         # bookmark explicitly pushed\\n         self.bookmarks = bookmarks\\n         # allow push of new branch\\n         self.newbranch = newbranch\\n         # step already performed\\n         # (used to check what steps have been already performed through bundle2)\\n         self.stepsdone = set()\\n         # Integer version of the changegroup push result\\n         # - None means nothing to push\\n         # - 0 means HTTP error\\n         # - 1 means we pushed and remote head count is unchanged *or*\\n         #   we have outgoing changesets but refused to push\\n         # - other values as described by addchangegroup()\\n         self.cgresult = None\\n         # Boolean value for the bookmark push\\n         self.bkresult = None\\n         # discover.outgoing object (contains common and outgoing data)\\n         self.outgoing = None\\n         # all remote topological heads before the push\\n         self.remoteheads = None\\n         # Details of the remote branch pre and post push\\n         #\\n         # mapping: {'branch': ([remoteheads],\\n         #                      [newheads],\\n         #                      [unsyncedheads],\\n         #                      [discardedheads])}\\n         # - branch: the branch name\\n         # - remoteheads: the list of remote heads known locally\\n         #                None if the branch is new\\n         # - newheads: the new remote heads (known locally) with outgoing pushed\\n         # - unsyncedheads: the list of remote heads unknown locally.\\n         # - discardedheads: the list of remote heads made obsolete by the push\\n         self.pushbranchmap = None\\n         # testable as a boolean indicating if any nodes are missing locally.\\n         self.incoming = None\\n         # summary of the remote phase situation\\n         self.remotephases = None\\n         # phases changes that must be pushed along side the changesets\\n         self.outdatedphases = None\\n         # phases changes that must be pushed if changeset push fails\\n         self.fallbackoutdatedphases = None\\n         # outgoing obsmarkers\\n         self.outobsmarkers = set()\\n         # outgoing bookmarks\\n         self.outbookmarks = []\\n         # transaction manager\\n         self.trmanager = None\\n         # map { pushkey partid -\\u003e callback handling failure}\\n         # used to handle exception from mandatory pushkey part failure\\n         self.pkfailcb = {}\\n         # an iterable of pushvars or None\\n         self.pushvars = pushvars\\n \\n     @util.propertycache\\n     def futureheads(self):\\n         \\\"\\\"\\\"future remote heads if the changeset push succeeds\\\"\\\"\\\"\\n         return self.outgoing.missingheads\\n \\n     @util.propertycache\\n     def fallbackheads(self):\\n         \\\"\\\"\\\"future remote heads if the changeset push fails\\\"\\\"\\\"\\n         if self.revs is None:\\n             # not target to push, all common are relevant\\n             return self.outgoing.commonheads\\n         unfi = self.repo.unfiltered()\\n         # I want cheads = heads(::missingheads and ::commonheads)\\n         # (missingheads is revs with secret changeset filtered out)\\n         #\\n         # This can be expressed as:\\n         #     cheads = ( (missingheads and ::commonheads)\\n         #              + (commonheads and ::missingheads))\\\"\\n         #              )\\n         #\\n         # while trying to push we already computed the following:\\n         #     common = (::commonheads)\\n         #     missing = ((commonheads::missingheads) - commonheads)\\n         #\\n         # We can pick:\\n         # * missingheads part of common (::commonheads)\\n         common = self.outgoing.common\\n         nm = self.repo.changelog.nodemap\\n         cheads = [node for node in self.revs if nm[node] in common]\\n         # and\\n         # * commonheads parents on missing\\n         revset = unfi.set('%ln and parents(roots(%ln))',\\n                          self.outgoing.commonheads,\\n                          self.outgoing.missing)\\n         cheads.extend(c.node() for c in revset)\\n         return cheads\\n \\n     @property\\n     def commonheads(self):\\n         \\\"\\\"\\\"set of all common heads after changeset bundle push\\\"\\\"\\\"\\n         if self.cgresult:\\n             return self.futureheads\\n         else:\\n             return self.fallbackheads\\n \\n # mapping of message used when pushing bookmark\\n bookmsgmap = {'update': (_(\\\"updating bookmark %s\\\\n\\\"),\\n                          _('updating bookmark %s failed!\\\\n')),\\n               'export': (_(\\\"exporting bookmark %s\\\\n\\\"),\\n                          _('exporting bookmark %s failed!\\\\n')),\\n               'delete': (_(\\\"deleting remote bookmark %s\\\\n\\\"),\\n                          _('deleting remote bookmark %s failed!\\\\n')),\\n               }\\n \\n \\n def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),\\n          opargs=None):\\n     '''Push outgoing changesets (limited by revs) from a local\\n     repository to remote. Return an integer:\\n       - None means nothing to push\\n       - 0 means HTTP error\\n       - 1 means we pushed and remote head count is unchanged *or*\\n         we have outgoing changesets but refused to push\\n       - other values as described by addchangegroup()\\n     '''\\n     if opargs is None:\\n         opargs = {}\\n     pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,\\n                            **pycompat.strkwargs(opargs))\\n     if pushop.remote.local():\\n         missing = (set(pushop.repo.requirements)\\n                    - pushop.remote.local().supported)\\n         if missing:\\n             msg = _(\\\"required features are not\\\"\\n                     \\\" supported in the destination:\\\"\\n                     \\\" %s\\\") % (', '.join(sorted(missing)))\\n             raise error.Abort(msg)\\n \\n     if not pushop.remote.canpush():\\n         raise error.Abort(_(\\\"destination does not support push\\\"))\\n \\n     if not pushop.remote.capable('unbundle'):\\n         raise error.Abort(_('cannot push: destination does not support the '\\n                             'unbundle wire protocol command'))\\n \\n     # get lock as we might write phase data\\n     wlock = lock = None\\n     try:\\n         # bundle2 push may receive a reply bundle touching bookmarks or other\\n         # things requiring the wlock. Take it now to ensure proper ordering.\\n         maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')\\n         if (not _forcebundle1(pushop)) and maypushback:\\n             wlock = pushop.repo.wlock()\\n         lock = pushop.repo.lock()\\n         pushop.trmanager = transactionmanager(pushop.repo,\\n                                               'push-response',\\n                                               pushop.remote.url())\\n     except error.LockUnavailable as err:\\n         # source repo cannot be locked.\\n         # We do not abort the push, but just disable the local phase\\n         # synchronisation.\\n         msg = 'cannot lock source repository: %s\\\\n' % err\\n         pushop.ui.debug(msg)\\n \\n     with wlock or util.nullcontextmanager(), \\\\\\n             lock or util.nullcontextmanager(), \\\\\\n             pushop.trmanager or util.nullcontextmanager():\\n         pushop.repo.checkpush(pushop)\\n         _pushdiscovery(pushop)\\n         if not _forcebundle1(pushop):\\n             _pushbundle2(pushop)\\n         _pushchangeset(pushop)\\n         _pushsyncphase(pushop)\\n         _pushobsolete(pushop)\\n         _pushbookmark(pushop)\\n \\n     if repo.ui.configbool('experimental', 'remotenames'):\\n         logexchange.pullremotenames(repo, remote)\\n \\n     return pushop\\n \\n # list of steps to perform discovery before push\\n pushdiscoveryorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n pushdiscoverymapping = {}\\n \\n def pushdiscovery(stepname):\\n     \\\"\\\"\\\"decorator for function performing discovery before push\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated function will be added in order (this\\n     may matter).\\n \\n     You can only use this decorator for a new step, if you want to wrap a step\\n     from an extension, change the pushdiscovery dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in pushdiscoverymapping\\n         pushdiscoverymapping[stepname] = func\\n         pushdiscoveryorder.append(stepname)\\n         return func\\n     return dec\\n \\n def _pushdiscovery(pushop):\\n     \\\"\\\"\\\"Run all discovery steps\\\"\\\"\\\"\\n     for stepname in pushdiscoveryorder:\\n         step = pushdiscoverymapping[stepname]\\n         step(pushop)\\n \\n @pushdiscovery('changeset')\\n def _pushdiscoverychangeset(pushop):\\n     \\\"\\\"\\\"discover the changeset that need to be pushed\\\"\\\"\\\"\\n     fci = discovery.findcommonincoming\\n     if pushop.revs:\\n         commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,\\n                         ancestorsof=pushop.revs)\\n     else:\\n         commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)\\n     common, inc, remoteheads = commoninc\\n     fco = discovery.findcommonoutgoing\\n     outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,\\n                    commoninc=commoninc, force=pushop.force)\\n     pushop.outgoing = outgoing\\n     pushop.remoteheads = remoteheads\\n     pushop.incoming = inc\\n \\n @pushdiscovery('phase')\\n def _pushdiscoveryphase(pushop):\\n     \\\"\\\"\\\"discover the phase that needs to be pushed\\n \\n     (computed for both success and failure case for changesets push)\\\"\\\"\\\"\\n     outgoing = pushop.outgoing\\n     unfi = pushop.repo.unfiltered()\\n     remotephases = listkeys(pushop.remote, 'phases')\\n \\n     if (pushop.ui.configbool('ui', '_usedassubrepo')\\n         and remotephases    # server supports phases\\n         and not pushop.outgoing.missing # no changesets to be pushed\\n         and remotephases.get('publishing', False)):\\n         # When:\\n         # - this is a subrepo push\\n         # - and remote support phase\\n         # - and no changeset are to be pushed\\n         # - and remote is publishing\\n         # We may be in issue 3781 case!\\n         # We drop the possible phase synchronisation done by\\n         # courtesy to publish changesets possibly locally draft\\n         # on the remote.\\n         pushop.outdatedphases = []\\n         pushop.fallbackoutdatedphases = []\\n         return\\n \\n     pushop.remotephases = phases.remotephasessummary(pushop.repo,\\n                                                      pushop.fallbackheads,\\n                                                      remotephases)\\n     droots = pushop.remotephases.draftroots\\n \\n     extracond = ''\\n     if not pushop.remotephases.publishing:\\n         extracond = ' and public()'\\n     revset = 'heads((%%ln::%%ln) %s)' % extracond\\n     # Get the list of all revs draft on remote by public here.\\n     # XXX Beware that revset break if droots is not strictly\\n     # XXX root we may want to ensure it is but it is costly\\n     fallback = list(unfi.set(revset, droots, pushop.fallbackheads))\\n     if not outgoing.missing:\\n         future = fallback\\n     else:\\n         # adds changeset we are going to push as draft\\n         #\\n         # should not be necessary for publishing server, but because of an\\n         # issue fixed in xxxxx we have to do it anyway.\\n         fdroots = list(unfi.set('roots(%ln  + %ln::)',\\n                        outgoing.missing, droots))\\n         fdroots = [f.node() for f in fdroots]\\n         future = list(unfi.set(revset, fdroots, pushop.futureheads))\\n     pushop.outdatedphases = future\\n     pushop.fallbackoutdatedphases = fallback\\n \\n @pushdiscovery('obsmarker')\\n def _pushdiscoveryobsmarkers(pushop):\\n     if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):\\n         return\\n \\n     if not pushop.repo.obsstore:\\n         return\\n \\n     if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):\\n         return\\n \\n     repo = pushop.repo\\n     # very naive computation, that can be quite expensive on big repo.\\n     # However: evolution is currently slow on them anyway.\\n     nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))\\n     pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)\\n \\n @pushdiscovery('bookmarks')\\n def _pushdiscoverybookmarks(pushop):\\n     ui = pushop.ui\\n     repo = pushop.repo.unfiltered()\\n     remote = pushop.remote\\n     ui.debug(\\\"checking for updated bookmarks\\\\n\\\")\\n     ancestors = ()\\n     if pushop.revs:\\n         revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)\\n         ancestors = repo.changelog.ancestors(revnums, inclusive=True)\\n \\n     remotebookmark = listkeys(remote, 'bookmarks')\\n \\n     explicit = set([repo._bookmarks.expandname(bookmark)\\n                     for bookmark in pushop.bookmarks])\\n \\n     remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)\\n     comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)\\n \\n     def safehex(x):\\n         if x is None:\\n             return x\\n         return hex(x)\\n \\n     def hexifycompbookmarks(bookmarks):\\n         return [(b, safehex(scid), safehex(dcid))\\n                 for (b, scid, dcid) in bookmarks]\\n \\n     comp = [hexifycompbookmarks(marks) for marks in comp]\\n     return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)\\n \\n def _processcompared(pushop, pushed, explicit, remotebms, comp):\\n     \\\"\\\"\\\"take decision on bookmark to pull from the remote bookmark\\n \\n     Exist to help extensions who want to alter this behavior.\\n     \\\"\\\"\\\"\\n     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp\\n \\n     repo = pushop.repo\\n \\n     for b, scid, dcid in advsrc:\\n         if b in explicit:\\n             explicit.remove(b)\\n         if not pushed or repo[scid].rev() in pushed:\\n             pushop.outbookmarks.append((b, dcid, scid))\\n     # search added bookmark\\n     for b, scid, dcid in addsrc:\\n         if b in explicit:\\n             explicit.remove(b)\\n             pushop.outbookmarks.append((b, '', scid))\\n     # search for overwritten bookmark\\n     for b, scid, dcid in list(advdst) + list(diverge) + list(differ):\\n         if b in explicit:\\n             explicit.remove(b)\\n             pushop.outbookmarks.append((b, dcid, scid))\\n     # search for bookmark to delete\\n     for b, scid, dcid in adddst:\\n         if b in explicit:\\n             explicit.remove(b)\\n             # treat as \\\"deleted locally\\\"\\n             pushop.outbookmarks.append((b, dcid, ''))\\n     # identical bookmarks shouldn't get reported\\n     for b, scid, dcid in same:\\n         if b in explicit:\\n             explicit.remove(b)\\n \\n     if explicit:\\n         explicit = sorted(explicit)\\n         # we should probably list all of them\\n         pushop.ui.warn(_('bookmark %s does not exist on the local '\\n                          'or remote repository!\\\\n') % explicit[0])\\n         pushop.bkresult = 2\\n \\n     pushop.outbookmarks.sort()\\n \\n def _pushcheckoutgoing(pushop):\\n     outgoing = pushop.outgoing\\n     unfi = pushop.repo.unfiltered()\\n     if not outgoing.missing:\\n         # nothing to push\\n         scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)\\n         return False\\n     # something to push\\n     if not pushop.force:\\n         # if repo.obsstore == False --\\u003e no obsolete\\n         # then, save the iteration\\n         if unfi.obsstore:\\n             # this message are here for 80 char limit reason\\n             mso = _(\\\"push includes obsolete changeset: %s!\\\")\\n             mspd = _(\\\"push includes phase-divergent changeset: %s!\\\")\\n             mscd = _(\\\"push includes content-divergent changeset: %s!\\\")\\n             mst = {\\\"orphan\\\": _(\\\"push includes orphan changeset: %s!\\\"),\\n                    \\\"phase-divergent\\\": mspd,\\n                    \\\"content-divergent\\\": mscd}\\n             # If we are to push if there is at least one\\n             # obsolete or unstable changeset in missing, at\\n             # least one of the missinghead will be obsolete or\\n             # unstable. So checking heads only is ok\\n             for node in outgoing.missingheads:\\n                 ctx = unfi[node]\\n                 if ctx.obsolete():\\n                     raise error.Abort(mso % ctx)\\n                 elif ctx.isunstable():\\n                     # TODO print more than one instability in the abort\\n                     # message\\n                     raise error.Abort(mst[ctx.instabilities()[0]] % ctx)\\n \\n         discovery.checkheads(pushop)\\n     return True\\n \\n # List of names of steps to perform for an outgoing bundle2, order matters.\\n b2partsgenorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n b2partsgenmapping = {}\\n \\n def b2partsgenerator(stepname, idx=None):\\n     \\\"\\\"\\\"decorator for function generating bundle2 part\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated functions will be added in order\\n     (this may matter).\\n \\n     You can only use this decorator for new steps, if you want to wrap a step\\n     from an extension, attack the b2partsgenmapping dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in b2partsgenmapping\\n         b2partsgenmapping[stepname] = func\\n         if idx is None:\\n             b2partsgenorder.append(stepname)\\n         else:\\n             b2partsgenorder.insert(idx, stepname)\\n         return func\\n     return dec\\n \\n def _pushb2ctxcheckheads(pushop, bundler):\\n     \\\"\\\"\\\"Generate race condition checking parts\\n \\n     Exists as an independent function to aid extensions\\n     \\\"\\\"\\\"\\n     # * 'force' do not check for push race,\\n     # * if we don't push anything, there are nothing to check.\\n     if not pushop.force and pushop.outgoing.missingheads:\\n         allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())\\n         emptyremote = pushop.pushbranchmap is None\\n         if not allowunrelated or emptyremote:\\n             bundler.newpart('check:heads', data=iter(pushop.remoteheads))\\n         else:\\n             affected = set()\\n             for branch, heads in pushop.pushbranchmap.iteritems():\\n                 remoteheads, newheads, unsyncedheads, discardedheads = heads\\n                 if remoteheads is not None:\\n                     remote = set(remoteheads)\\n                     affected |= set(discardedheads) & remote\\n                     affected |= remote - set(newheads)\\n             if affected:\\n                 data = iter(sorted(affected))\\n                 bundler.newpart('check:updated-heads', data=data)\\n \\n def _pushing(pushop):\\n     \\\"\\\"\\\"return True if we are pushing anything\\\"\\\"\\\"\\n     return bool(pushop.outgoing.missing\\n                 or pushop.outdatedphases\\n                 or pushop.outobsmarkers\\n                 or pushop.outbookmarks)\\n \\n @b2partsgenerator('check-bookmarks')\\n def _pushb2checkbookmarks(pushop, bundler):\\n     \\\"\\\"\\\"insert bookmark move checking\\\"\\\"\\\"\\n     if not _pushing(pushop) or pushop.force:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     hasbookmarkcheck = 'bookmarks' in b2caps\\n     if not (pushop.outbookmarks and hasbookmarkcheck):\\n         return\\n     data = []\\n     for book, old, new in pushop.outbookmarks:\\n         old = bin(old)\\n         data.append((book, old))\\n     checkdata = bookmod.binaryencode(data)\\n     bundler.newpart('check:bookmarks', data=checkdata)\\n \\n @b2partsgenerator('check-phases')\\n def _pushb2checkphases(pushop, bundler):\\n     \\\"\\\"\\\"insert phase move checking\\\"\\\"\\\"\\n     if not _pushing(pushop) or pushop.force:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     hasphaseheads = 'heads' in b2caps.get('phases', ())\\n     if pushop.remotephases is not None and hasphaseheads:\\n         # check that the remote phase has not changed\\n         checks = [[] for p in phases.allphases]\\n         checks[phases.public].extend(pushop.remotephases.publicheads)\\n         checks[phases.draft].extend(pushop.remotephases.draftroots)\\n         if any(checks):\\n             for nodes in checks:\\n                 nodes.sort()\\n             checkdata = phases.binaryencode(checks)\\n             bundler.newpart('check:phases', data=checkdata)\\n \\n @b2partsgenerator('changeset')\\n def _pushb2ctx(pushop, bundler):\\n     \\\"\\\"\\\"handle changegroup push through bundle2\\n \\n     addchangegroup result is stored in the ``pushop.cgresult`` attribute.\\n     \\\"\\\"\\\"\\n     if 'changesets' in pushop.stepsdone:\\n         return\\n     pushop.stepsdone.add('changesets')\\n     # Send known heads to the server for race detection.\\n     if not _pushcheckoutgoing(pushop):\\n         return\\n     pushop.repo.prepushoutgoinghooks(pushop)\\n \\n     _pushb2ctxcheckheads(pushop, bundler)\\n \\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     version = '01'\\n     cgversions = b2caps.get('changegroup')\\n     if cgversions:  # 3.1 and 3.2 ship with an empty value\\n         cgversions = [v for v in cgversions\\n                       if v in changegroup.supportedoutgoingversions(\\n                           pushop.repo)]\\n         if not cgversions:\\n             raise ValueError(_('no common changegroup version'))\\n         version = max(cgversions)\\n     cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,\\n                                       'push')\\n     cgpart = bundler.newpart('changegroup', data=cgstream)\\n     if cgversions:\\n         cgpart.addparam('version', version)\\n     if 'treemanifest' in pushop.repo.requirements:\\n         cgpart.addparam('treemanifest', '1')\\n     def handlereply(op):\\n         \\\"\\\"\\\"extract addchangegroup returns from server reply\\\"\\\"\\\"\\n         cgreplies = op.records.getreplies(cgpart.id)\\n         assert len(cgreplies['changegroup']) == 1\\n         pushop.cgresult = cgreplies['changegroup'][0]['return']\\n     return handlereply\\n \\n @b2partsgenerator('phase')\\n def _pushb2phases(pushop, bundler):\\n     \\\"\\\"\\\"handle phase push through bundle2\\\"\\\"\\\"\\n     if 'phases' in pushop.stepsdone:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     ui = pushop.repo.ui\\n \\n     legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')\\n     haspushkey = 'pushkey' in b2caps\\n     hasphaseheads = 'heads' in b2caps.get('phases', ())\\n \\n     if hasphaseheads and not legacyphase:\\n         return _pushb2phaseheads(pushop, bundler)\\n     elif haspushkey:\\n         return _pushb2phasespushkey(pushop, bundler)\\n \\n def _pushb2phaseheads(pushop, bundler):\\n     \\\"\\\"\\\"push phase information through a bundle2 - binary part\\\"\\\"\\\"\\n     pushop.stepsdone.add('phases')\\n     if pushop.outdatedphases:\\n         updates = [[] for p in phases.allphases]\\n         updates[0].extend(h.node() for h in pushop.outdatedphases)\\n         phasedata = phases.binaryencode(updates)\\n         bundler.newpart('phase-heads', data=phasedata)\\n \\n def _pushb2phasespushkey(pushop, bundler):\\n     \\\"\\\"\\\"push phase information through a bundle2 - pushkey part\\\"\\\"\\\"\\n     pushop.stepsdone.add('phases')\\n     part2node = []\\n \\n     def handlefailure(pushop, exc):\\n         targetid = int(exc.partid)\\n         for partid, node in part2node:\\n             if partid == targetid:\\n                 raise error.Abort(_('updating %s to public failed') % node)\\n \\n     enc = pushkey.encode\\n     for newremotehead in pushop.outdatedphases:\\n         part = bundler.newpart('pushkey')\\n         part.addparam('namespace', enc('phases'))\\n         part.addparam('key', enc(newremotehead.hex()))\\n         part.addparam('old', enc('%d' % phases.draft))\\n         part.addparam('new', enc('%d' % phases.public))\\n         part2node.append((part.id, newremotehead))\\n         pushop.pkfailcb[part.id] = handlefailure\\n \\n     def handlereply(op):\\n         for partid, node in part2node:\\n             partrep = op.records.getreplies(partid)\\n             results = partrep['pushkey']\\n             assert len(results) \\u003c= 1\\n             msg = None\\n             if not results:\\n                 msg = _('server ignored update of %s to public!\\\\n') % node\\n             elif not int(results[0]['return']):\\n                 msg = _('updating %s to public failed!\\\\n') % node\\n             if msg is not None:\\n                 pushop.ui.warn(msg)\\n     return handlereply\\n \\n @b2partsgenerator('obsmarkers')\\n def _pushb2obsmarkers(pushop, bundler):\\n     if 'obsmarkers' in pushop.stepsdone:\\n         return\\n     remoteversions = bundle2.obsmarkersversion(bundler.capabilities)\\n     if obsolete.commonversion(remoteversions) is None:\\n         return\\n     pushop.stepsdone.add('obsmarkers')\\n     if pushop.outobsmarkers:\\n         markers = sorted(pushop.outobsmarkers)\\n         bundle2.buildobsmarkerspart(bundler, markers)\\n \\n @b2partsgenerator('bookmarks')\\n def _pushb2bookmarks(pushop, bundler):\\n     \\\"\\\"\\\"handle bookmark push through bundle2\\\"\\\"\\\"\\n     if 'bookmarks' in pushop.stepsdone:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n \\n     legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')\\n     legacybooks = 'bookmarks' in legacy\\n \\n     if not legacybooks and 'bookmarks' in b2caps:\\n         return _pushb2bookmarkspart(pushop, bundler)\\n     elif 'pushkey' in b2caps:\\n         return _pushb2bookmarkspushkey(pushop, bundler)\\n \\n def _bmaction(old, new):\\n     \\\"\\\"\\\"small utility for bookmark pushing\\\"\\\"\\\"\\n     if not old:\\n         return 'export'\\n     elif not new:\\n         return 'delete'\\n     return 'update'\\n \\n def _pushb2bookmarkspart(pushop, bundler):\\n     pushop.stepsdone.add('bookmarks')\\n     if not pushop.outbookmarks:\\n         return\\n \\n     allactions = []\\n     data = []\\n     for book, old, new in pushop.outbookmarks:\\n         new = bin(new)\\n         data.append((book, new))\\n         allactions.append((book, _bmaction(old, new)))\\n     checkdata = bookmod.binaryencode(data)\\n     bundler.newpart('bookmarks', data=checkdata)\\n \\n     def handlereply(op):\\n         ui = pushop.ui\\n         # if success\\n         for book, action in allactions:\\n             ui.status(bookmsgmap[action][0] % book)\\n \\n     return handlereply\\n \\n def _pushb2bookmarkspushkey(pushop, bundler):\\n     pushop.stepsdone.add('bookmarks')\\n     part2book = []\\n     enc = pushkey.encode\\n \\n     def handlefailure(pushop, exc):\\n         targetid = int(exc.partid)\\n         for partid, book, action in part2book:\\n             if partid == targetid:\\n                 raise error.Abort(bookmsgmap[action][1].rstrip() % book)\\n         # we should not be called for part we did not generated\\n         assert False\\n \\n     for book, old, new in pushop.outbookmarks:\\n         part = bundler.newpart('pushkey')\\n         part.addparam('namespace', enc('bookmarks'))\\n         part.addparam('key', enc(book))\\n         part.addparam('old', enc(old))\\n         part.addparam('new', enc(new))\\n         action = 'update'\\n         if not old:\\n             action = 'export'\\n         elif not new:\\n             action = 'delete'\\n         part2book.append((part.id, book, action))\\n         pushop.pkfailcb[part.id] = handlefailure\\n \\n     def handlereply(op):\\n         ui = pushop.ui\\n         for partid, book, action in part2book:\\n             partrep = op.records.getreplies(partid)\\n             results = partrep['pushkey']\\n             assert len(results) \\u003c= 1\\n             if not results:\\n                 pushop.ui.warn(_('server ignored bookmark %s update\\\\n') % book)\\n             else:\\n                 ret = int(results[0]['return'])\\n                 if ret:\\n                     ui.status(bookmsgmap[action][0] % book)\\n                 else:\\n                     ui.warn(bookmsgmap[action][1] % book)\\n                     if pushop.bkresult is not None:\\n                         pushop.bkresult = 1\\n     return handlereply\\n \\n @b2partsgenerator('pushvars', idx=0)\\n def _getbundlesendvars(pushop, bundler):\\n     '''send shellvars via bundle2'''\\n     pushvars = pushop.pushvars\\n     if pushvars:\\n         shellvars = {}\\n         for raw in pushvars:\\n             if '=' not in raw:\\n                 msg = (\\\"unable to parse variable '%s', should follow \\\"\\n                         \\\"'KEY=VALUE' or 'KEY=' format\\\")\\n                 raise error.Abort(msg % raw)\\n             k, v = raw.split('=', 1)\\n             shellvars[k] = v\\n \\n         part = bundler.newpart('pushvars')\\n \\n         for key, value in shellvars.iteritems():\\n             part.addparam(key, value, mandatory=False)\\n \\n def _pushbundle2(pushop):\\n     \\\"\\\"\\\"push data to the remote using bundle2\\n \\n     The only currently supported type of data is changegroup but this will\\n     evolve in the future.\\\"\\\"\\\"\\n     bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))\\n     pushback = (pushop.trmanager\\n                 and pushop.ui.configbool('experimental', 'bundle2.pushback'))\\n \\n     # create reply capability\\n     capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,\\n                                                       allowpushback=pushback,\\n                                                       role='client'))\\n     bundler.newpart('replycaps', data=capsblob)\\n     replyhandlers = []\\n     for partgenname in b2partsgenorder:\\n         partgen = b2partsgenmapping[partgenname]\\n         ret = partgen(pushop, bundler)\\n         if callable(ret):\\n             replyhandlers.append(ret)\\n     # do not push if nothing to push\\n     if bundler.nbparts \\u003c= 1:\\n         return\\n     stream = util.chunkbuffer(bundler.getchunks())\\n     try:\\n         try:\\n             with pushop.remote.commandexecutor() as e:\\n                 reply = e.callcommand('unbundle', {\\n                     'bundle': stream,\\n                     'heads': ['force'],\\n                     'url': pushop.remote.url(),\\n                 }).result()\\n         except error.BundleValueError as exc:\\n             raise error.Abort(_('missing support for %s') % exc)\\n         try:\\n             trgetter = None\\n             if pushback:\\n                 trgetter = pushop.trmanager.transaction\\n             op = bundle2.processbundle(pushop.repo, reply, trgetter)\\n         except error.BundleValueError as exc:\\n             raise error.Abort(_('missing support for %s') % exc)\\n         except bundle2.AbortFromPart as exc:\\n             pushop.ui.status(_('remote: %s\\\\n') % exc)\\n             if exc.hint is not None:\\n                 pushop.ui.status(_('remote: %s\\\\n') % ('(%s)' % exc.hint))\\n             raise error.Abort(_('push failed on remote'))\\n     except error.PushkeyFailed as exc:\\n         partid = int(exc.partid)\\n         if partid not in pushop.pkfailcb:\\n             raise\\n         pushop.pkfailcb[partid](pushop, exc)\\n     for rephand in replyhandlers:\\n         rephand(op)\\n \\n def _pushchangeset(pushop):\\n     \\\"\\\"\\\"Make the actual push of changeset bundle to remote repo\\\"\\\"\\\"\\n     if 'changesets' in pushop.stepsdone:\\n         return\\n     pushop.stepsdone.add('changesets')\\n     if not _pushcheckoutgoing(pushop):\\n         return\\n \\n     # Should have verified this in push().\\n     assert pushop.remote.capable('unbundle')\\n \\n     pushop.repo.prepushoutgoinghooks(pushop)\\n     outgoing = pushop.outgoing\\n     # TODO: get bundlecaps from remote\\n     bundlecaps = None\\n     # create a changegroup from local\\n     if pushop.revs is None and not (outgoing.excluded\\n                             or pushop.repo.changelog.filteredrevs):\\n         # push everything,\\n         # use the fast path, no race possible on push\\n         cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',\\n                 fastpath=True, bundlecaps=bundlecaps)\\n     else:\\n         cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',\\n                                         'push', bundlecaps=bundlecaps)\\n \\n     # apply changegroup to remote\\n     # local repo finds heads on server, finds out what\\n     # revs it must push. once revs transferred, if server\\n     # finds it has different heads (someone else won\\n     # commit\\/push race), server aborts.\\n     if pushop.force:\\n         remoteheads = ['force']\\n     else:\\n         remoteheads = pushop.remoteheads\\n     # ssh: return remote's addchangegroup()\\n     # http: return remote's addchangegroup() or 0 for error\\n     pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,\\n                                         pushop.repo.url())\\n \\n def _pushsyncphase(pushop):\\n     \\\"\\\"\\\"synchronise phase information locally and remotely\\\"\\\"\\\"\\n     cheads = pushop.commonheads\\n     # even when we don't push, exchanging phase data is useful\\n     remotephases = listkeys(pushop.remote, 'phases')\\n     if (pushop.ui.configbool('ui', '_usedassubrepo')\\n         and remotephases    # server supports phases\\n         and pushop.cgresult is None # nothing was pushed\\n         and remotephases.get('publishing', False)):\\n         # When:\\n         # - this is a subrepo push\\n         # - and remote support phase\\n         # - and no changeset was pushed\\n         # - and remote is publishing\\n         # We may be in issue 3871 case!\\n         # We drop the possible phase synchronisation done by\\n         # courtesy to publish changesets possibly locally draft\\n         # on the remote.\\n         remotephases = {'publishing': 'True'}\\n     if not remotephases: # old server or public only reply from non-publishing\\n         _localphasemove(pushop, cheads)\\n         # don't push any phase data as there is nothing to push\\n     else:\\n         ana = phases.analyzeremotephases(pushop.repo, cheads,\\n                                          remotephases)\\n         pheads, droots = ana\\n         ### Apply remote phase on local\\n         if remotephases.get('publishing', False):\\n             _localphasemove(pushop, cheads)\\n         else: # publish = False\\n             _localphasemove(pushop, pheads)\\n             _localphasemove(pushop, cheads, phases.draft)\\n         ### Apply local phase on remote\\n \\n         if pushop.cgresult:\\n             if 'phases' in pushop.stepsdone:\\n                 # phases already pushed though bundle2\\n                 return\\n             outdated = pushop.outdatedphases\\n         else:\\n             outdated = pushop.fallbackoutdatedphases\\n \\n         pushop.stepsdone.add('phases')\\n \\n         # filter heads already turned public by the push\\n         outdated = [c for c in outdated if c.node() not in pheads]\\n         # fallback to independent pushkey command\\n         for newremotehead in outdated:\\n             with pushop.remote.commandexecutor() as e:\\n                 r = e.callcommand('pushkey', {\\n                     'namespace': 'phases',\\n                     'key': newremotehead.hex(),\\n                     'old': '%d' % phases.draft,\\n                     'new': '%d' % phases.public\\n                 }).result()\\n \\n             if not r:\\n                 pushop.ui.warn(_('updating %s to public failed!\\\\n')\\n                                % newremotehead)\\n \\n def _localphasemove(pushop, nodes, phase=phases.public):\\n     \\\"\\\"\\\"move \\u003cnodes\\u003e to \\u003cphase\\u003e in the local source repo\\\"\\\"\\\"\\n     if pushop.trmanager:\\n         phases.advanceboundary(pushop.repo,\\n                                pushop.trmanager.transaction(),\\n                                phase,\\n                                nodes)\\n     else:\\n         # repo is not locked, do not change any phases!\\n         # Informs the user that phases should have been moved when\\n         # applicable.\\n         actualmoves = [n for n in nodes if phase \\u003c pushop.repo[n].phase()]\\n         phasestr = phases.phasenames[phase]\\n         if actualmoves:\\n             pushop.ui.status(_('cannot lock source repo, skipping '\\n                                'local %s phase update\\\\n') % phasestr)\\n \\n def _pushobsolete(pushop):\\n     \\\"\\\"\\\"utility function to push obsolete markers to a remote\\\"\\\"\\\"\\n     if 'obsmarkers' in pushop.stepsdone:\\n         return\\n     repo = pushop.repo\\n     remote = pushop.remote\\n     pushop.stepsdone.add('obsmarkers')\\n     if pushop.outobsmarkers:\\n         pushop.ui.debug('try to push obsolete markers to remote\\\\n')\\n         rslts = []\\n         remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))\\n         for key in sorted(remotedata, reverse=True):\\n             # reverse sort to ensure we end with dump0\\n             data = remotedata[key]\\n             rslts.append(remote.pushkey('obsolete', key, '', data))\\n         if [r for r in rslts if not r]:\\n             msg = _('failed to push some obsolete markers!\\\\n')\\n             repo.ui.warn(msg)\\n \\n def _pushbookmark(pushop):\\n     \\\"\\\"\\\"Update bookmark position on remote\\\"\\\"\\\"\\n     if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:\\n         return\\n     pushop.stepsdone.add('bookmarks')\\n     ui = pushop.ui\\n     remote = pushop.remote\\n \\n     for b, old, new in pushop.outbookmarks:\\n         action = 'update'\\n         if not old:\\n             action = 'export'\\n         elif not new:\\n             action = 'delete'\\n \\n         with remote.commandexecutor() as e:\\n             r = e.callcommand('pushkey', {\\n                 'namespace': 'bookmarks',\\n                 'key': b,\\n                 'old': old,\\n                 'new': new,\\n             }).result()\\n \\n         if r:\\n             ui.status(bookmsgmap[action][0] % b)\\n         else:\\n             ui.warn(bookmsgmap[action][1] % b)\\n             # discovery can have set the value form invalid entry\\n             if pushop.bkresult is not None:\\n                 pushop.bkresult = 1\\n \\n class pulloperation(object):\\n     \\\"\\\"\\\"A object that represent a single pull operation\\n \\n     It purpose is to carry pull related state and very common operation.\\n \\n     A new should be created at the beginning of each pull and discarded\\n     afterward.\\n     \\\"\\\"\\\"\\n \\n     def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),\\n                  remotebookmarks=None, streamclonerequested=None,\\n                  includepats=None, excludepats=None):\\n         # repo we pull into\\n         self.repo = repo\\n         # repo we pull from\\n         self.remote = remote\\n         # revision we try to pull (None is \\\"all\\\")\\n         self.heads = heads\\n         # bookmark pulled explicitly\\n         self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)\\n                                   for bookmark in bookmarks]\\n         # do we force pull?\\n         self.force = force\\n         # whether a streaming clone was requested\\n         self.streamclonerequested = streamclonerequested\\n         # transaction manager\\n         self.trmanager = None\\n         # set of common changeset between local and remote before pull\\n         self.common = None\\n         # set of pulled head\\n         self.rheads = None\\n         # list of missing changeset to fetch remotely\\n         self.fetch = None\\n         # remote bookmarks data\\n         self.remotebookmarks = remotebookmarks\\n         # result of changegroup pulling (used as return code by pull)\\n         self.cgresult = None\\n         # list of step already done\\n         self.stepsdone = set()\\n         # Whether we attempted a clone from pre-generated bundles.\\n         self.clonebundleattempted = False\\n         # Set of file patterns to include.\\n         self.includepats = includepats\\n         # Set of file patterns to exclude.\\n         self.excludepats = excludepats\\n \\n     @util.propertycache\\n     def pulledsubset(self):\\n         \\\"\\\"\\\"heads of the set of changeset target by the pull\\\"\\\"\\\"\\n         # compute target subset\\n         if self.heads is None:\\n             # We pulled every thing possible\\n             # sync on everything common\\n             c = set(self.common)\\n             ret = list(self.common)\\n             for n in self.rheads:\\n                 if n not in c:\\n                     ret.append(n)\\n             return ret\\n         else:\\n             # We pulled a specific subset\\n             # sync on this subset\\n             return self.heads\\n \\n     @util.propertycache\\n     def canusebundle2(self):\\n         return not _forcebundle1(self)\\n \\n     @util.propertycache\\n     def remotebundle2caps(self):\\n         return bundle2.bundle2caps(self.remote)\\n \\n     def gettransaction(self):\\n         # deprecated; talk to trmanager directly\\n         return self.trmanager.transaction()\\n \\n class transactionmanager(util.transactional):\\n     \\\"\\\"\\\"An object to manage the life cycle of a transaction\\n \\n     It creates the transaction on demand and calls the appropriate hooks when\\n     closing the transaction.\\\"\\\"\\\"\\n     def __init__(self, repo, source, url):\\n         self.repo = repo\\n         self.source = source\\n         self.url = url\\n         self._tr = None\\n \\n     def transaction(self):\\n         \\\"\\\"\\\"Return an open transaction object, constructing if necessary\\\"\\\"\\\"\\n         if not self._tr:\\n             trname = '%s\\\\n%s' % (self.source, util.hidepassword(self.url))\\n             self._tr = self.repo.transaction(trname)\\n             self._tr.hookargs['source'] = self.source\\n             self._tr.hookargs['url'] = self.url\\n         return self._tr\\n \\n     def close(self):\\n         \\\"\\\"\\\"close transaction if created\\\"\\\"\\\"\\n         if self._tr is not None:\\n             self._tr.close()\\n \\n     def release(self):\\n         \\\"\\\"\\\"release transaction if created\\\"\\\"\\\"\\n         if self._tr is not None:\\n             self._tr.release()\\n \\n def listkeys(remote, namespace):\\n     with remote.commandexecutor() as e:\\n         return e.callcommand('listkeys', {'namespace': namespace}).result()\\n \\n def _fullpullbundle2(repo, pullop):\\n     # The server may send a partial reply, i.e. when inlining\\n     # pre-computed bundles. In that case, update the common\\n     # set based on the results and pull another bundle.\\n     #\\n     # There are two indicators that the process is finished:\\n     # - no changeset has been added, or\\n     # - all remote heads are known locally.\\n     # The head check must use the unfiltered view as obsoletion\\n     # markers can hide heads.\\n     unfi = repo.unfiltered()\\n     unficl = unfi.changelog\\n     def headsofdiff(h1, h2):\\n         \\\"\\\"\\\"Returns heads(h1 % h2)\\\"\\\"\\\"\\n         res = unfi.set('heads(%ln %% %ln)', h1, h2)\\n         return set(ctx.node() for ctx in res)\\n     def headsofunion(h1, h2):\\n         \\\"\\\"\\\"Returns heads((h1 + h2) - null)\\\"\\\"\\\"\\n         res = unfi.set('heads((%ln + %ln - null))', h1, h2)\\n         return set(ctx.node() for ctx in res)\\n     while True:\\n         old_heads = unficl.heads()\\n         clstart = len(unficl)\\n         _pullbundle2(pullop)\\n         if repository.NARROW_REQUIREMENT in repo.requirements:\\n             # XXX narrow clones filter the heads on the server side during\\n             # XXX getbundle and result in partial replies as well.\\n             # XXX Disable pull bundles in this case as band aid to avoid\\n             # XXX extra round trips.\\n             break\\n         if clstart == len(unficl):\\n             break\\n         if all(unficl.hasnode(n) for n in pullop.rheads):\\n             break\\n         new_heads = headsofdiff(unficl.heads(), old_heads)\\n         pullop.common = headsofunion(new_heads, pullop.common)\\n         pullop.rheads = set(pullop.rheads) - pullop.common\\n \\n def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,\\n          streamclonerequested=None, includepats=None, excludepats=None):\\n     \\\"\\\"\\\"Fetch repository data from a remote.\\n \\n     This is the main function used to retrieve data from a remote repository.\\n \\n     ``repo`` is the local repository to clone into.\\n     ``remote`` is a peer instance.\\n     ``heads`` is an iterable of revisions we want to pull. ``None`` (the\\n     default) means to pull everything from the remote.\\n     ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By\\n     default, all remote bookmarks are pulled.\\n     ``opargs`` are additional keyword arguments to pass to ``pulloperation``\\n     initialization.\\n     ``streamclonerequested`` is a boolean indicating whether a \\\"streaming\\n     clone\\\" is requested. A \\\"streaming clone\\\" is essentially a raw file copy\\n     of revlogs from the server. This only works when the local repository is\\n     empty. The default value of ``None`` means to respect the server\\n     configuration for preferring stream clones.\\n     ``includepats`` and ``excludepats`` define explicit file patterns to\\n     include and exclude in storage, respectively. If not defined, narrow\\n     patterns from the repo instance are used, if available.\\n \\n     Returns the ``pulloperation`` created for this pull.\\n     \\\"\\\"\\\"\\n     if opargs is None:\\n         opargs = {}\\n \\n     # We allow the narrow patterns to be passed in explicitly to provide more\\n     # flexibility for API consumers.\\n     if includepats or excludepats:\\n         includepats = includepats or set()\\n         excludepats = excludepats or set()\\n     else:\\n         includepats, excludepats = repo.narrowpats\\n \\n     narrowspec.validatepatterns(includepats)\\n     narrowspec.validatepatterns(excludepats)\\n \\n     pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,\\n                            streamclonerequested=streamclonerequested,\\n                            includepats=includepats, excludepats=excludepats,\\n                            **pycompat.strkwargs(opargs))\\n \\n     peerlocal = pullop.remote.local()\\n     if peerlocal:\\n         missing = set(peerlocal.requirements) - pullop.repo.supported\\n         if missing:\\n             msg = _(\\\"required features are not\\\"\\n                     \\\" supported in the destination:\\\"\\n                     \\\" %s\\\") % (', '.join(sorted(missing)))\\n             raise error.Abort(msg)\\n \\n     pullop.trmanager = transactionmanager(repo, 'pull', remote.url())\\n     with repo.wlock(), repo.lock(), pullop.trmanager:\\n-        # This should ideally be in _pullbundle2(). However, it needs to run\\n-        # before discovery to avoid extra work.\\n-        _maybeapplyclonebundle(pullop)\\n-        streamclone.maybeperformlegacystreamclone(pullop)\\n-        _pulldiscovery(pullop)\\n-        if pullop.canusebundle2:\\n-            _fullpullbundle2(repo, pullop)\\n-        _pullchangeset(pullop)\\n-        _pullphase(pullop)\\n-        _pullbookmarks(pullop)\\n-        _pullobsolete(pullop)\\n+        # Use the modern wire protocol, if available.\\n+        if remote.capable('exchangev2'):\\n+            exchangev2.pull(pullop)\\n+        else:\\n+            # This should ideally be in _pullbundle2(). However, it needs to run\\n+            # before discovery to avoid extra work.\\n+            _maybeapplyclonebundle(pullop)\\n+            streamclone.maybeperformlegacystreamclone(pullop)\\n+            _pulldiscovery(pullop)\\n+            if pullop.canusebundle2:\\n+                _fullpullbundle2(repo, pullop)\\n+            _pullchangeset(pullop)\\n+            _pullphase(pullop)\\n+            _pullbookmarks(pullop)\\n+            _pullobsolete(pullop)\\n \\n     # storing remotenames\\n     if repo.ui.configbool('experimental', 'remotenames'):\\n         logexchange.pullremotenames(repo, remote)\\n \\n     return pullop\\n \\n # list of steps to perform discovery before pull\\n pulldiscoveryorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n pulldiscoverymapping = {}\\n \\n def pulldiscovery(stepname):\\n     \\\"\\\"\\\"decorator for function performing discovery before pull\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated function will be added in order (this\\n     may matter).\\n \\n     You can only use this decorator for a new step, if you want to wrap a step\\n     from an extension, change the pulldiscovery dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in pulldiscoverymapping\\n         pulldiscoverymapping[stepname] = func\\n         pulldiscoveryorder.append(stepname)\\n         return func\\n     return dec\\n \\n def _pulldiscovery(pullop):\\n     \\\"\\\"\\\"Run all discovery steps\\\"\\\"\\\"\\n     for stepname in pulldiscoveryorder:\\n         step = pulldiscoverymapping[stepname]\\n         step(pullop)\\n \\n @pulldiscovery('b1:bookmarks')\\n def _pullbookmarkbundle1(pullop):\\n     \\\"\\\"\\\"fetch bookmark data in bundle1 case\\n \\n     If not using bundle2, we have to fetch bookmarks before changeset\\n     discovery to reduce the chance and impact of race conditions.\\\"\\\"\\\"\\n     if pullop.remotebookmarks is not None:\\n         return\\n     if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:\\n         # all known bundle2 servers now support listkeys, but lets be nice with\\n         # new implementation.\\n         return\\n     books = listkeys(pullop.remote, 'bookmarks')\\n     pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)\\n \\n \\n @pulldiscovery('changegroup')\\n def _pulldiscoverychangegroup(pullop):\\n     \\\"\\\"\\\"discovery phase for the pull\\n \\n     Current handle changeset discovery only, will change handle all discovery\\n     at some point.\\\"\\\"\\\"\\n     tmp = discovery.findcommonincoming(pullop.repo,\\n                                        pullop.remote,\\n                                        heads=pullop.heads,\\n                                        force=pullop.force)\\n     common, fetch, rheads = tmp\\n     nm = pullop.repo.unfiltered().changelog.nodemap\\n     if fetch and rheads:\\n         # If a remote heads is filtered locally, put in back in common.\\n         #\\n         # This is a hackish solution to catch most of \\\"common but locally\\n         # hidden situation\\\".  We do not performs discovery on unfiltered\\n         # repository because it end up doing a pathological amount of round\\n         # trip for w huge amount of changeset we do not care about.\\n         #\\n         # If a set of such \\\"common but filtered\\\" changeset exist on the server\\n         # but are not including a remote heads, we'll not be able to detect it,\\n         scommon = set(common)\\n         for n in rheads:\\n             if n in nm:\\n                 if n not in scommon:\\n                     common.append(n)\\n         if set(rheads).issubset(set(common)):\\n             fetch = []\\n     pullop.common = common\\n     pullop.fetch = fetch\\n     pullop.rheads = rheads\\n \\n def _pullbundle2(pullop):\\n     \\\"\\\"\\\"pull data using bundle2\\n \\n     For now, the only supported data are changegroup.\\\"\\\"\\\"\\n     kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}\\n \\n     # make ui easier to access\\n     ui = pullop.repo.ui\\n \\n     # At the moment we don't do stream clones over bundle2. If that is\\n     # implemented then here's where the check for that will go.\\n     streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]\\n \\n     # declare pull perimeters\\n     kwargs['common'] = pullop.common\\n     kwargs['heads'] = pullop.heads or pullop.rheads\\n \\n     if streaming:\\n         kwargs['cg'] = False\\n         kwargs['stream'] = True\\n         pullop.stepsdone.add('changegroup')\\n         pullop.stepsdone.add('phases')\\n \\n     else:\\n         # pulling changegroup\\n         pullop.stepsdone.add('changegroup')\\n \\n         kwargs['cg'] = pullop.fetch\\n \\n         legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')\\n         hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())\\n         if (not legacyphase and hasbinaryphase):\\n             kwargs['phases'] = True\\n             pullop.stepsdone.add('phases')\\n \\n         if 'listkeys' in pullop.remotebundle2caps:\\n             if 'phases' not in pullop.stepsdone:\\n                 kwargs['listkeys'] = ['phases']\\n \\n     bookmarksrequested = False\\n     legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')\\n     hasbinarybook = 'bookmarks' in pullop.remotebundle2caps\\n \\n     if pullop.remotebookmarks is not None:\\n         pullop.stepsdone.add('request-bookmarks')\\n \\n     if ('request-bookmarks' not in pullop.stepsdone\\n         and pullop.remotebookmarks is None\\n         and not legacybookmark and hasbinarybook):\\n         kwargs['bookmarks'] = True\\n         bookmarksrequested = True\\n \\n     if 'listkeys' in pullop.remotebundle2caps:\\n         if 'request-bookmarks' not in pullop.stepsdone:\\n             # make sure to always includes bookmark data when migrating\\n             # `hg incoming --bundle` to using this function.\\n             pullop.stepsdone.add('request-bookmarks')\\n             kwargs.setdefault('listkeys', []).append('bookmarks')\\n \\n     # If this is a full pull \\/ clone and the server supports the clone bundles\\n     # feature, tell the server whether we attempted a clone bundle. The\\n     # presence of this flag indicates the client supports clone bundles. This\\n     # will enable the server to treat clients that support clone bundles\\n     # differently from those that don't.\\n     if (pullop.remote.capable('clonebundles')\\n         and pullop.heads is None and list(pullop.common) == [nullid]):\\n         kwargs['cbattempted'] = pullop.clonebundleattempted\\n \\n     if streaming:\\n         pullop.repo.ui.status(_('streaming all changes\\\\n'))\\n     elif not pullop.fetch:\\n         pullop.repo.ui.status(_(\\\"no changes found\\\\n\\\"))\\n         pullop.cgresult = 0\\n     else:\\n         if pullop.heads is None and list(pullop.common) == [nullid]:\\n             pullop.repo.ui.status(_(\\\"requesting all changes\\\\n\\\"))\\n     if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):\\n         remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)\\n         if obsolete.commonversion(remoteversions) is not None:\\n             kwargs['obsmarkers'] = True\\n             pullop.stepsdone.add('obsmarkers')\\n     _pullbundle2extraprepare(pullop, kwargs)\\n \\n     with pullop.remote.commandexecutor() as e:\\n         args = dict(kwargs)\\n         args['source'] = 'pull'\\n         bundle = e.callcommand('getbundle', args).result()\\n \\n         try:\\n             op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,\\n                                          source='pull')\\n             op.modes['bookmarks'] = 'records'\\n             bundle2.processbundle(pullop.repo, bundle, op=op)\\n         except bundle2.AbortFromPart as exc:\\n             pullop.repo.ui.status(_('remote: abort: %s\\\\n') % exc)\\n             raise error.Abort(_('pull failed on remote'), hint=exc.hint)\\n         except error.BundleValueError as exc:\\n             raise error.Abort(_('missing support for %s') % exc)\\n \\n     if pullop.fetch:\\n         pullop.cgresult = bundle2.combinechangegroupresults(op)\\n \\n     # processing phases change\\n     for namespace, value in op.records['listkeys']:\\n         if namespace == 'phases':\\n             _pullapplyphases(pullop, value)\\n \\n     # processing bookmark update\\n     if bookmarksrequested:\\n         books = {}\\n         for record in op.records['bookmarks']:\\n             books[record['bookmark']] = record[\\\"node\\\"]\\n         pullop.remotebookmarks = books\\n     else:\\n         for namespace, value in op.records['listkeys']:\\n             if namespace == 'bookmarks':\\n                 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)\\n \\n     # bookmark data were either already there or pulled in the bundle\\n     if pullop.remotebookmarks is not None:\\n         _pullbookmarks(pullop)\\n \\n def _pullbundle2extraprepare(pullop, kwargs):\\n     \\\"\\\"\\\"hook function so that extensions can extend the getbundle call\\\"\\\"\\\"\\n \\n def _pullchangeset(pullop):\\n     \\\"\\\"\\\"pull changeset from unbundle into the local repo\\\"\\\"\\\"\\n     # We delay the open of the transaction as late as possible so we\\n     # don't open transaction for nothing or you break future useful\\n     # rollback call\\n     if 'changegroup' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('changegroup')\\n     if not pullop.fetch:\\n         pullop.repo.ui.status(_(\\\"no changes found\\\\n\\\"))\\n         pullop.cgresult = 0\\n         return\\n     tr = pullop.gettransaction()\\n     if pullop.heads is None and list(pullop.common) == [nullid]:\\n         pullop.repo.ui.status(_(\\\"requesting all changes\\\\n\\\"))\\n     elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):\\n         # issue1320, avoid a race if remote changed after discovery\\n         pullop.heads = pullop.rheads\\n \\n     if pullop.remote.capable('getbundle'):\\n         # TODO: get bundlecaps from remote\\n         cg = pullop.remote.getbundle('pull', common=pullop.common,\\n                                      heads=pullop.heads or pullop.rheads)\\n     elif pullop.heads is None:\\n         with pullop.remote.commandexecutor() as e:\\n             cg = e.callcommand('changegroup', {\\n                 'nodes': pullop.fetch,\\n                 'source': 'pull',\\n             }).result()\\n \\n     elif not pullop.remote.capable('changegroupsubset'):\\n         raise error.Abort(_(\\\"partial pull cannot be done because \\\"\\n                            \\\"other repository doesn't support \\\"\\n                            \\\"changegroupsubset.\\\"))\\n     else:\\n         with pullop.remote.commandexecutor() as e:\\n             cg = e.callcommand('changegroupsubset', {\\n                 'bases': pullop.fetch,\\n                 'heads': pullop.heads,\\n                 'source': 'pull',\\n             }).result()\\n \\n     bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',\\n                                    pullop.remote.url())\\n     pullop.cgresult = bundle2.combinechangegroupresults(bundleop)\\n \\n def _pullphase(pullop):\\n     # Get remote phases data from remote\\n     if 'phases' in pullop.stepsdone:\\n         return\\n     remotephases = listkeys(pullop.remote, 'phases')\\n     _pullapplyphases(pullop, remotephases)\\n \\n def _pullapplyphases(pullop, remotephases):\\n     \\\"\\\"\\\"apply phase movement from observed remote state\\\"\\\"\\\"\\n     if 'phases' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('phases')\\n     publishing = bool(remotephases.get('publishing', False))\\n     if remotephases and not publishing:\\n         # remote is new and non-publishing\\n         pheads, _dr = phases.analyzeremotephases(pullop.repo,\\n                                                  pullop.pulledsubset,\\n                                                  remotephases)\\n         dheads = pullop.pulledsubset\\n     else:\\n         # Remote is old or publishing all common changesets\\n         # should be seen as public\\n         pheads = pullop.pulledsubset\\n         dheads = []\\n     unfi = pullop.repo.unfiltered()\\n     phase = unfi._phasecache.phase\\n     rev = unfi.changelog.nodemap.get\\n     public = phases.public\\n     draft = phases.draft\\n \\n     # exclude changesets already public locally and update the others\\n     pheads = [pn for pn in pheads if phase(unfi, rev(pn)) \\u003e public]\\n     if pheads:\\n         tr = pullop.gettransaction()\\n         phases.advanceboundary(pullop.repo, tr, public, pheads)\\n \\n     # exclude changesets already draft locally and update the others\\n     dheads = [pn for pn in dheads if phase(unfi, rev(pn)) \\u003e draft]\\n     if dheads:\\n         tr = pullop.gettransaction()\\n         phases.advanceboundary(pullop.repo, tr, draft, dheads)\\n \\n def _pullbookmarks(pullop):\\n     \\\"\\\"\\\"process the remote bookmark information to update the local one\\\"\\\"\\\"\\n     if 'bookmarks' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('bookmarks')\\n     repo = pullop.repo\\n     remotebookmarks = pullop.remotebookmarks\\n     bookmod.updatefromremote(repo.ui, repo, remotebookmarks,\\n                              pullop.remote.url(),\\n                              pullop.gettransaction,\\n                              explicit=pullop.explicitbookmarks)\\n \\n def _pullobsolete(pullop):\\n     \\\"\\\"\\\"utility function to pull obsolete markers from a remote\\n \\n     The `gettransaction` is function that return the pull transaction, creating\\n     one if necessary. We return the transaction to inform the calling code that\\n     a new transaction have been created (when applicable).\\n \\n     Exists mostly to allow overriding for experimentation purpose\\\"\\\"\\\"\\n     if 'obsmarkers' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('obsmarkers')\\n     tr = None\\n     if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):\\n         pullop.repo.ui.debug('fetching remote obsolete markers\\\\n')\\n         remoteobs = listkeys(pullop.remote, 'obsolete')\\n         if 'dump0' in remoteobs:\\n             tr = pullop.gettransaction()\\n             markers = []\\n             for key in sorted(remoteobs, reverse=True):\\n                 if key.startswith('dump'):\\n                     data = util.b85decode(remoteobs[key])\\n                     version, newmarks = obsolete._readmarkers(data)\\n                     markers += newmarks\\n             if markers:\\n                 pullop.repo.obsstore.add(tr, markers)\\n             pullop.repo.invalidatevolatilesets()\\n     return tr\\n \\n def applynarrowacl(repo, kwargs):\\n     \\\"\\\"\\\"Apply narrow fetch access control.\\n \\n     This massages the named arguments for getbundle wire protocol commands\\n     so requested data is filtered through access control rules.\\n     \\\"\\\"\\\"\\n     ui = repo.ui\\n     # TODO this assumes existence of HTTP and is a layering violation.\\n     username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())\\n     user_includes = ui.configlist(\\n         _NARROWACL_SECTION, username + '.includes',\\n         ui.configlist(_NARROWACL_SECTION, 'default.includes'))\\n     user_excludes = ui.configlist(\\n         _NARROWACL_SECTION, username + '.excludes',\\n         ui.configlist(_NARROWACL_SECTION, 'default.excludes'))\\n     if not user_includes:\\n         raise error.Abort(_(\\\"{} configuration for user {} is empty\\\")\\n                           .format(_NARROWACL_SECTION, username))\\n \\n     user_includes = [\\n         'path:.' if p == '*' else 'path:' + p for p in user_includes]\\n     user_excludes = [\\n         'path:.' if p == '*' else 'path:' + p for p in user_excludes]\\n \\n     req_includes = set(kwargs.get(r'includepats', []))\\n     req_excludes = set(kwargs.get(r'excludepats', []))\\n \\n     req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(\\n         req_includes, req_excludes, user_includes, user_excludes)\\n \\n     if invalid_includes:\\n         raise error.Abort(\\n             _(\\\"The following includes are not accessible for {}: {}\\\")\\n             .format(username, invalid_includes))\\n \\n     new_args = {}\\n     new_args.update(kwargs)\\n     new_args[r'narrow'] = True\\n     new_args[r'includepats'] = req_includes\\n     if req_excludes:\\n         new_args[r'excludepats'] = req_excludes\\n \\n     return new_args\\n \\n def _computeellipsis(repo, common, heads, known, match, depth=None):\\n     \\\"\\\"\\\"Compute the shape of a narrowed DAG.\\n \\n     Args:\\n       repo: The repository we're transferring.\\n       common: The roots of the DAG range we're transferring.\\n               May be just [nullid], which means all ancestors of heads.\\n       heads: The heads of the DAG range we're transferring.\\n       match: The narrowmatcher that allows us to identify relevant changes.\\n       depth: If not None, only consider nodes to be full nodes if they are at\\n              most depth changesets away from one of heads.\\n \\n     Returns:\\n       A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:\\n \\n         visitnodes: The list of nodes (either full or ellipsis) which\\n                     need to be sent to the client.\\n         relevant_nodes: The set of changelog nodes which change a file inside\\n                  the narrowspec. The client needs these as non-ellipsis nodes.\\n         ellipsisroots: A dict of {rev: parents} that is used in\\n                        narrowchangegroup to produce ellipsis nodes with the\\n                        correct parents.\\n     \\\"\\\"\\\"\\n     cl = repo.changelog\\n     mfl = repo.manifestlog\\n \\n     clrev = cl.rev\\n \\n     commonrevs = {clrev(n) for n in common} | {nullrev}\\n     headsrevs = {clrev(n) for n in heads}\\n \\n     if depth:\\n         revdepth = {h: 0 for h in headsrevs}\\n \\n     ellipsisheads = collections.defaultdict(set)\\n     ellipsisroots = collections.defaultdict(set)\\n \\n     def addroot(head, curchange):\\n         \\\"\\\"\\\"Add a root to an ellipsis head, splitting heads with 3 roots.\\\"\\\"\\\"\\n         ellipsisroots[head].add(curchange)\\n         # Recursively split ellipsis heads with 3 roots by finding the\\n         # roots' youngest common descendant which is an elided merge commit.\\n         # That descendant takes 2 of the 3 roots as its own, and becomes a\\n         # root of the head.\\n         while len(ellipsisroots[head]) \\u003e 2:\\n             child, roots = splithead(head)\\n             splitroots(head, child, roots)\\n             head = child  # Recurse in case we just added a 3rd root\\n \\n     def splitroots(head, child, roots):\\n         ellipsisroots[head].difference_update(roots)\\n         ellipsisroots[head].add(child)\\n         ellipsisroots[child].update(roots)\\n         ellipsisroots[child].discard(child)\\n \\n     def splithead(head):\\n         r1, r2, r3 = sorted(ellipsisroots[head])\\n         for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):\\n             mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',\\n                             nr1, head, nr2, head)\\n             for j in mid:\\n                 if j == nr2:\\n                     return nr2, (nr1, nr2)\\n                 if j not in ellipsisroots or len(ellipsisroots[j]) \\u003c 2:\\n                     return j, (nr1, nr2)\\n         raise error.Abort(_('Failed to split up ellipsis node! head: %d, '\\n                             'roots: %d %d %d') % (head, r1, r2, r3))\\n \\n     missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))\\n     visit = reversed(missing)\\n     relevant_nodes = set()\\n     visitnodes = [cl.node(m) for m in missing]\\n     required = set(headsrevs) | known\\n     for rev in visit:\\n         clrev = cl.changelogrevision(rev)\\n         ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]\\n         if depth is not None:\\n             curdepth = revdepth[rev]\\n             for p in ps:\\n                 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))\\n         needed = False\\n         shallow_enough = depth is None or revdepth[rev] \\u003c= depth\\n         if shallow_enough:\\n             curmf = mfl[clrev.manifest].read()\\n             if ps:\\n                 # We choose to not trust the changed files list in\\n                 # changesets because it's not always correct. TODO: could\\n                 # we trust it for the non-merge case?\\n                 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()\\n                 needed = bool(curmf.diff(p1mf, match))\\n                 if not needed and len(ps) \\u003e 1:\\n                     # For merge changes, the list of changed files is not\\n                     # helpful, since we need to emit the merge if a file\\n                     # in the narrow spec has changed on either side of the\\n                     # merge. As a result, we do a manifest diff to check.\\n                     p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()\\n                     needed = bool(curmf.diff(p2mf, match))\\n             else:\\n                 # For a root node, we need to include the node if any\\n                 # files in the node match the narrowspec.\\n                 needed = any(curmf.walk(match))\\n \\n         if needed:\\n             for head in ellipsisheads[rev]:\\n                 addroot(head, rev)\\n             for p in ps:\\n                 required.add(p)\\n             relevant_nodes.add(cl.node(rev))\\n         else:\\n             if not ps:\\n                 ps = [nullrev]\\n             if rev in required:\\n                 for head in ellipsisheads[rev]:\\n                     addroot(head, rev)\\n                 for p in ps:\\n                     ellipsisheads[p].add(rev)\\n             else:\\n                 for p in ps:\\n                     ellipsisheads[p] |= ellipsisheads[rev]\\n \\n     # add common changesets as roots of their reachable ellipsis heads\\n     for c in commonrevs:\\n         for head in ellipsisheads[c]:\\n             addroot(head, c)\\n     return visitnodes, relevant_nodes, ellipsisroots\\n \\n def caps20to10(repo, role):\\n     \\\"\\\"\\\"return a set with appropriate options to use bundle20 during getbundle\\\"\\\"\\\"\\n     caps = {'HG20'}\\n     capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))\\n     caps.add('bundle2=' + urlreq.quote(capsblob))\\n     return caps\\n \\n # List of names of steps to perform for a bundle2 for getbundle, order matters.\\n getbundle2partsorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n getbundle2partsmapping = {}\\n \\n def getbundle2partsgenerator(stepname, idx=None):\\n     \\\"\\\"\\\"decorator for function generating bundle2 part for getbundle\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated functions will be added in order\\n     (this may matter).\\n \\n     You can only use this decorator for new steps, if you want to wrap a step\\n     from an extension, attack the getbundle2partsmapping dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in getbundle2partsmapping\\n         getbundle2partsmapping[stepname] = func\\n         if idx is None:\\n             getbundle2partsorder.append(stepname)\\n         else:\\n             getbundle2partsorder.insert(idx, stepname)\\n         return func\\n     return dec\\n \\n def bundle2requested(bundlecaps):\\n     if bundlecaps is not None:\\n         return any(cap.startswith('HG2') for cap in bundlecaps)\\n     return False\\n \\n def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,\\n                     **kwargs):\\n     \\\"\\\"\\\"Return chunks constituting a bundle's raw data.\\n \\n     Could be a bundle HG10 or a bundle HG20 depending on bundlecaps\\n     passed.\\n \\n     Returns a 2-tuple of a dict with metadata about the generated bundle\\n     and an iterator over raw chunks (of varying sizes).\\n     \\\"\\\"\\\"\\n     kwargs = pycompat.byteskwargs(kwargs)\\n     info = {}\\n     usebundle2 = bundle2requested(bundlecaps)\\n     # bundle10 case\\n     if not usebundle2:\\n         if bundlecaps and not kwargs.get('cg', True):\\n             raise ValueError(_('request for bundle10 must include changegroup'))\\n \\n         if kwargs:\\n             raise ValueError(_('unsupported getbundle arguments: %s')\\n                              % ', '.join(sorted(kwargs.keys())))\\n         outgoing = _computeoutgoing(repo, heads, common)\\n         info['bundleversion'] = 1\\n         return info, changegroup.makestream(repo, outgoing, '01', source,\\n                                             bundlecaps=bundlecaps)\\n \\n     # bundle20 case\\n     info['bundleversion'] = 2\\n     b2caps = {}\\n     for bcaps in bundlecaps:\\n         if bcaps.startswith('bundle2='):\\n             blob = urlreq.unquote(bcaps[len('bundle2='):])\\n             b2caps.update(bundle2.decodecaps(blob))\\n     bundler = bundle2.bundle20(repo.ui, b2caps)\\n \\n     kwargs['heads'] = heads\\n     kwargs['common'] = common\\n \\n     for name in getbundle2partsorder:\\n         func = getbundle2partsmapping[name]\\n         func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,\\n              **pycompat.strkwargs(kwargs))\\n \\n     info['prefercompressed'] = bundler.prefercompressed\\n \\n     return info, bundler.getchunks()\\n \\n @getbundle2partsgenerator('stream2')\\n def _getbundlestream2(bundler, repo, *args, **kwargs):\\n     return bundle2.addpartbundlestream2(bundler, repo, **kwargs)\\n \\n @getbundle2partsgenerator('changegroup')\\n def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,\\n                               b2caps=None, heads=None, common=None, **kwargs):\\n     \\\"\\\"\\\"add a changegroup part to the requested bundle\\\"\\\"\\\"\\n     if not kwargs.get(r'cg', True):\\n         return\\n \\n     version = '01'\\n     cgversions = b2caps.get('changegroup')\\n     if cgversions:  # 3.1 and 3.2 ship with an empty value\\n         cgversions = [v for v in cgversions\\n                       if v in changegroup.supportedoutgoingversions(repo)]\\n         if not cgversions:\\n             raise ValueError(_('no common changegroup version'))\\n         version = max(cgversions)\\n \\n     outgoing = _computeoutgoing(repo, heads, common)\\n     if not outgoing.missing:\\n         return\\n \\n     if kwargs.get(r'narrow', False):\\n         include = sorted(filter(bool, kwargs.get(r'includepats', [])))\\n         exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))\\n         filematcher = narrowspec.match(repo.root, include=include,\\n                                        exclude=exclude)\\n     else:\\n         filematcher = None\\n \\n     cgstream = changegroup.makestream(repo, outgoing, version, source,\\n                                       bundlecaps=bundlecaps,\\n                                       filematcher=filematcher)\\n \\n     part = bundler.newpart('changegroup', data=cgstream)\\n     if cgversions:\\n         part.addparam('version', version)\\n \\n     part.addparam('nbchanges', '%d' % len(outgoing.missing),\\n                   mandatory=False)\\n \\n     if 'treemanifest' in repo.requirements:\\n         part.addparam('treemanifest', '1')\\n \\n     if kwargs.get(r'narrow', False) and (include or exclude):\\n         narrowspecpart = bundler.newpart('narrow:spec')\\n         if include:\\n             narrowspecpart.addparam(\\n                 'include', '\\\\n'.join(include), mandatory=True)\\n         if exclude:\\n             narrowspecpart.addparam(\\n                 'exclude', '\\\\n'.join(exclude), mandatory=True)\\n \\n @getbundle2partsgenerator('bookmarks')\\n def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,\\n                               b2caps=None, **kwargs):\\n     \\\"\\\"\\\"add a bookmark part to the requested bundle\\\"\\\"\\\"\\n     if not kwargs.get(r'bookmarks', False):\\n         return\\n     if 'bookmarks' not in b2caps:\\n         raise ValueError(_('no common bookmarks exchange method'))\\n     books  = bookmod.listbinbookmarks(repo)\\n     data = bookmod.binaryencode(books)\\n     if data:\\n         bundler.newpart('bookmarks', data=data)\\n \\n @getbundle2partsgenerator('listkeys')\\n def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,\\n                             b2caps=None, **kwargs):\\n     \\\"\\\"\\\"add parts containing listkeys namespaces to the requested bundle\\\"\\\"\\\"\\n     listkeys = kwargs.get(r'listkeys', ())\\n     for namespace in listkeys:\\n         part = bundler.newpart('listkeys')\\n         part.addparam('namespace', namespace)\\n         keys = repo.listkeys(namespace).items()\\n         part.data = pushkey.encodekeys(keys)\\n \\n @getbundle2partsgenerator('obsmarkers')\\n def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,\\n                             b2caps=None, heads=None, **kwargs):\\n     \\\"\\\"\\\"add an obsolescence markers part to the requested bundle\\\"\\\"\\\"\\n     if kwargs.get(r'obsmarkers', False):\\n         if heads is None:\\n             heads = repo.heads()\\n         subset = [c.node() for c in repo.set('::%ln', heads)]\\n         markers = repo.obsstore.relevantmarkers(subset)\\n         markers = sorted(markers)\\n         bundle2.buildobsmarkerspart(bundler, markers)\\n \\n @getbundle2partsgenerator('phases')\\n def _getbundlephasespart(bundler, repo, source, bundlecaps=None,\\n                             b2caps=None, heads=None, **kwargs):\\n     \\\"\\\"\\\"add phase heads part to the requested bundle\\\"\\\"\\\"\\n     if kwargs.get(r'phases', False):\\n         if not 'heads' in b2caps.get('phases'):\\n             raise ValueError(_('no common phases exchange method'))\\n         if heads is None:\\n             heads = repo.heads()\\n \\n         headsbyphase = collections.defaultdict(set)\\n         if repo.publishing():\\n             headsbyphase[phases.public] = heads\\n         else:\\n             # find the appropriate heads to move\\n \\n             phase = repo._phasecache.phase\\n             node = repo.changelog.node\\n             rev = repo.changelog.rev\\n             for h in heads:\\n                 headsbyphase[phase(repo, rev(h))].add(h)\\n             seenphases = list(headsbyphase.keys())\\n \\n             # We do not handle anything but public and draft phase for now)\\n             if seenphases:\\n                 assert max(seenphases) \\u003c= phases.draft\\n \\n             # if client is pulling non-public changesets, we need to find\\n             # intermediate public heads.\\n             draftheads = headsbyphase.get(phases.draft, set())\\n             if draftheads:\\n                 publicheads = headsbyphase.get(phases.public, set())\\n \\n                 revset = 'heads(only(%ln, %ln) and public())'\\n                 extraheads = repo.revs(revset, draftheads, publicheads)\\n                 for r in extraheads:\\n                     headsbyphase[phases.public].add(node(r))\\n \\n         # transform data in a format used by the encoding function\\n         phasemapping = []\\n         for phase in phases.allphases:\\n             phasemapping.append(sorted(headsbyphase[phase]))\\n \\n         # generate the actual part\\n         phasedata = phases.binaryencode(phasemapping)\\n         bundler.newpart('phase-heads', data=phasedata)\\n \\n @getbundle2partsgenerator('hgtagsfnodes')\\n def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,\\n                          b2caps=None, heads=None, common=None,\\n                          **kwargs):\\n     \\\"\\\"\\\"Transfer the .hgtags filenodes mapping.\\n \\n     Only values for heads in this bundle will be transferred.\\n \\n     The part data consists of pairs of 20 byte changeset node and .hgtags\\n     filenodes raw values.\\n     \\\"\\\"\\\"\\n     # Don't send unless:\\n     # - changeset are being exchanged,\\n     # - the client supports it.\\n     if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):\\n         return\\n \\n     outgoing = _computeoutgoing(repo, heads, common)\\n     bundle2.addparttagsfnodescache(repo, bundler, outgoing)\\n \\n @getbundle2partsgenerator('cache:rev-branch-cache')\\n def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,\\n                              b2caps=None, heads=None, common=None,\\n                              **kwargs):\\n     \\\"\\\"\\\"Transfer the rev-branch-cache mapping\\n \\n     The payload is a series of data related to each branch\\n \\n     1) branch name length\\n     2) number of open heads\\n     3) number of closed heads\\n     4) open heads nodes\\n     5) closed heads nodes\\n     \\\"\\\"\\\"\\n     # Don't send unless:\\n     # - changeset are being exchanged,\\n     # - the client supports it.\\n     # - narrow bundle isn't in play (not currently compatible).\\n     if (not kwargs.get(r'cg', True)\\n         or 'rev-branch-cache' not in b2caps\\n         or kwargs.get(r'narrow', False)\\n         or repo.ui.has_section(_NARROWACL_SECTION)):\\n         return\\n \\n     outgoing = _computeoutgoing(repo, heads, common)\\n     bundle2.addpartrevbranchcache(repo, bundler, outgoing)\\n \\n def check_heads(repo, their_heads, context):\\n     \\\"\\\"\\\"check if the heads of a repo have been modified\\n \\n     Used by peer for unbundling.\\n     \\\"\\\"\\\"\\n     heads = repo.heads()\\n     heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()\\n     if not (their_heads == ['force'] or their_heads == heads or\\n             their_heads == ['hashed', heads_hash]):\\n         # someone else committed\\/pushed\\/unbundled while we\\n         # were transferring data\\n         raise error.PushRaced('repository changed while %s - '\\n                               'please try again' % context)\\n \\n def unbundle(repo, cg, heads, source, url):\\n     \\\"\\\"\\\"Apply a bundle to a repo.\\n \\n     this function makes sure the repo is locked during the application and have\\n     mechanism to check that no push race occurred between the creation of the\\n     bundle and its application.\\n \\n     If the push was raced as PushRaced exception is raised.\\\"\\\"\\\"\\n     r = 0\\n     # need a transaction when processing a bundle2 stream\\n     # [wlock, lock, tr] - needs to be an array so nested functions can modify it\\n     lockandtr = [None, None, None]\\n     recordout = None\\n     # quick fix for output mismatch with bundle2 in 3.4\\n     captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')\\n     if url.startswith('remote:http:') or url.startswith('remote:https:'):\\n         captureoutput = True\\n     try:\\n         # note: outside bundle1, 'heads' is expected to be empty and this\\n         # 'check_heads' call wil be a no-op\\n         check_heads(repo, heads, 'uploading changes')\\n         # push can proceed\\n         if not isinstance(cg, bundle2.unbundle20):\\n             # legacy case: bundle1 (changegroup 01)\\n             txnname = \\\"\\\\n\\\".join([source, util.hidepassword(url)])\\n             with repo.lock(), repo.transaction(txnname) as tr:\\n                 op = bundle2.applybundle(repo, cg, tr, source, url)\\n                 r = bundle2.combinechangegroupresults(op)\\n         else:\\n             r = None\\n             try:\\n                 def gettransaction():\\n                     if not lockandtr[2]:\\n                         lockandtr[0] = repo.wlock()\\n                         lockandtr[1] = repo.lock()\\n                         lockandtr[2] = repo.transaction(source)\\n                         lockandtr[2].hookargs['source'] = source\\n                         lockandtr[2].hookargs['url'] = url\\n                         lockandtr[2].hookargs['bundle2'] = '1'\\n                     return lockandtr[2]\\n \\n                 # Do greedy locking by default until we're satisfied with lazy\\n                 # locking.\\n                 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):\\n                     gettransaction()\\n \\n                 op = bundle2.bundleoperation(repo, gettransaction,\\n                                              captureoutput=captureoutput,\\n                                              source='push')\\n                 try:\\n                     op = bundle2.processbundle(repo, cg, op=op)\\n                 finally:\\n                     r = op.reply\\n                     if captureoutput and r is not None:\\n                         repo.ui.pushbuffer(error=True, subproc=True)\\n                         def recordout(output):\\n                             r.newpart('output', data=output, mandatory=False)\\n                 if lockandtr[2] is not None:\\n                     lockandtr[2].close()\\n             except BaseException as exc:\\n                 exc.duringunbundle2 = True\\n                 if captureoutput and r is not None:\\n                     parts = exc._bundle2salvagedoutput = r.salvageoutput()\\n                     def recordout(output):\\n                         part = bundle2.bundlepart('output', data=output,\\n                                                   mandatory=False)\\n                         parts.append(part)\\n                 raise\\n     finally:\\n         lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])\\n         if recordout is not None:\\n             recordout(repo.ui.popbuffer())\\n     return r\\n \\n def _maybeapplyclonebundle(pullop):\\n     \\\"\\\"\\\"Apply a clone bundle from a remote, if possible.\\\"\\\"\\\"\\n \\n     repo = pullop.repo\\n     remote = pullop.remote\\n \\n     if not repo.ui.configbool('ui', 'clonebundles'):\\n         return\\n \\n     # Only run if local repo is empty.\\n     if len(repo):\\n         return\\n \\n     if pullop.heads:\\n         return\\n \\n     if not remote.capable('clonebundles'):\\n         return\\n \\n     with remote.commandexecutor() as e:\\n         res = e.callcommand('clonebundles', {}).result()\\n \\n     # If we call the wire protocol command, that's good enough to record the\\n     # attempt.\\n     pullop.clonebundleattempted = True\\n \\n     entries = parseclonebundlesmanifest(repo, res)\\n     if not entries:\\n         repo.ui.note(_('no clone bundles available on remote; '\\n                        'falling back to regular clone\\\\n'))\\n         return\\n \\n     entries = filterclonebundleentries(\\n         repo, entries, streamclonerequested=pullop.streamclonerequested)\\n \\n     if not entries:\\n         # There is a thundering herd concern here. However, if a server\\n         # operator doesn't advertise bundles appropriate for its clients,\\n         # they deserve what's coming. Furthermore, from a client's\\n         # perspective, no automatic fallback would mean not being able to\\n         # clone!\\n         repo.ui.warn(_('no compatible clone bundles available on server; '\\n                        'falling back to regular clone\\\\n'))\\n         repo.ui.warn(_('(you may want to report this to the server '\\n                        'operator)\\\\n'))\\n         return\\n \\n     entries = sortclonebundleentries(repo.ui, entries)\\n \\n     url = entries[0]['URL']\\n     repo.ui.status(_('applying clone bundle from %s\\\\n') % url)\\n     if trypullbundlefromurl(repo.ui, repo, url):\\n         repo.ui.status(_('finished applying clone bundle\\\\n'))\\n     # Bundle failed.\\n     #\\n     # We abort by default to avoid the thundering herd of\\n     # clients flooding a server that was expecting expensive\\n     # clone load to be offloaded.\\n     elif repo.ui.configbool('ui', 'clonebundlefallback'):\\n         repo.ui.warn(_('falling back to normal clone\\\\n'))\\n     else:\\n         raise error.Abort(_('error applying bundle'),\\n                           hint=_('if this error persists, consider contacting '\\n                                  'the server operator or disable clone '\\n                                  'bundles via '\\n                                  '\\\"--config ui.clonebundles=false\\\"'))\\n \\n def parseclonebundlesmanifest(repo, s):\\n     \\\"\\\"\\\"Parses the raw text of a clone bundles manifest.\\n \\n     Returns a list of dicts. The dicts have a ``URL`` key corresponding\\n     to the URL and other keys are the attributes for the entry.\\n     \\\"\\\"\\\"\\n     m = []\\n     for line in s.splitlines():\\n         fields = line.split()\\n         if not fields:\\n             continue\\n         attrs = {'URL': fields[0]}\\n         for rawattr in fields[1:]:\\n             key, value = rawattr.split('=', 1)\\n             key = urlreq.unquote(key)\\n             value = urlreq.unquote(value)\\n             attrs[key] = value\\n \\n             # Parse BUNDLESPEC into components. This makes client-side\\n             # preferences easier to specify since you can prefer a single\\n             # component of the BUNDLESPEC.\\n             if key == 'BUNDLESPEC':\\n                 try:\\n                     bundlespec = parsebundlespec(repo, value)\\n                     attrs['COMPRESSION'] = bundlespec.compression\\n                     attrs['VERSION'] = bundlespec.version\\n                 except error.InvalidBundleSpecification:\\n                     pass\\n                 except error.UnsupportedBundleSpecification:\\n                     pass\\n \\n         m.append(attrs)\\n \\n     return m\\n \\n def isstreamclonespec(bundlespec):\\n     # Stream clone v1\\n     if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):\\n         return True\\n \\n     # Stream clone v2\\n     if (bundlespec.wirecompression == 'UN' and \\\\\\n         bundlespec.wireversion == '02' and \\\\\\n         bundlespec.contentopts.get('streamv2')):\\n         return True\\n \\n     return False\\n \\n def filterclonebundleentries(repo, entries, streamclonerequested=False):\\n     \\\"\\\"\\\"Remove incompatible clone bundle manifest entries.\\n \\n     Accepts a list of entries parsed with ``parseclonebundlesmanifest``\\n     and returns a new list consisting of only the entries that this client\\n     should be able to apply.\\n \\n     There is no guarantee we'll be able to apply all returned entries because\\n     the metadata we use to filter on may be missing or wrong.\\n     \\\"\\\"\\\"\\n     newentries = []\\n     for entry in entries:\\n         spec = entry.get('BUNDLESPEC')\\n         if spec:\\n             try:\\n                 bundlespec = parsebundlespec(repo, spec, strict=True)\\n \\n                 # If a stream clone was requested, filter out non-streamclone\\n                 # entries.\\n                 if streamclonerequested and not isstreamclonespec(bundlespec):\\n                     repo.ui.debug('filtering %s because not a stream clone\\\\n' %\\n                                   entry['URL'])\\n                     continue\\n \\n             except error.InvalidBundleSpecification as e:\\n                 repo.ui.debug(stringutil.forcebytestr(e) + '\\\\n')\\n                 continue\\n             except error.UnsupportedBundleSpecification as e:\\n                 repo.ui.debug('filtering %s because unsupported bundle '\\n                               'spec: %s\\\\n' % (\\n                                   entry['URL'], stringutil.forcebytestr(e)))\\n                 continue\\n         # If we don't have a spec and requested a stream clone, we don't know\\n         # what the entry is so don't attempt to apply it.\\n         elif streamclonerequested:\\n             repo.ui.debug('filtering %s because cannot determine if a stream '\\n                           'clone bundle\\\\n' % entry['URL'])\\n             continue\\n \\n         if 'REQUIRESNI' in entry and not sslutil.hassni:\\n             repo.ui.debug('filtering %s because SNI not supported\\\\n' %\\n                           entry['URL'])\\n             continue\\n \\n         newentries.append(entry)\\n \\n     return newentries\\n \\n class clonebundleentry(object):\\n     \\\"\\\"\\\"Represents an item in a clone bundles manifest.\\n \\n     This rich class is needed to support sorting since sorted() in Python 3\\n     doesn't support ``cmp`` and our comparison is complex enough that ``key=``\\n     won't work.\\n     \\\"\\\"\\\"\\n \\n     def __init__(self, value, prefers):\\n         self.value = value\\n         self.prefers = prefers\\n \\n     def _cmp(self, other):\\n         for prefkey, prefvalue in self.prefers:\\n             avalue = self.value.get(prefkey)\\n             bvalue = other.value.get(prefkey)\\n \\n             # Special case for b missing attribute and a matches exactly.\\n             if avalue is not None and bvalue is None and avalue == prefvalue:\\n                 return -1\\n \\n             # Special case for a missing attribute and b matches exactly.\\n             if bvalue is not None and avalue is None and bvalue == prefvalue:\\n                 return 1\\n \\n             # We can't compare unless attribute present on both.\\n             if avalue is None or bvalue is None:\\n                 continue\\n \\n             # Same values should fall back to next attribute.\\n             if avalue == bvalue:\\n                 continue\\n \\n             # Exact matches come first.\\n             if avalue == prefvalue:\\n                 return -1\\n             if bvalue == prefvalue:\\n                 return 1\\n \\n             # Fall back to next attribute.\\n             continue\\n \\n         # If we got here we couldn't sort by attributes and prefers. Fall\\n         # back to index order.\\n         return 0\\n \\n     def __lt__(self, other):\\n         return self._cmp(other) \\u003c 0\\n \\n     def __gt__(self, other):\\n         return self._cmp(other) \\u003e 0\\n \\n     def __eq__(self, other):\\n         return self._cmp(other) == 0\\n \\n     def __le__(self, other):\\n         return self._cmp(other) \\u003c= 0\\n \\n     def __ge__(self, other):\\n         return self._cmp(other) \\u003e= 0\\n \\n     def __ne__(self, other):\\n         return self._cmp(other) != 0\\n \\n def sortclonebundleentries(ui, entries):\\n     prefers = ui.configlist('ui', 'clonebundleprefers')\\n     if not prefers:\\n         return list(entries)\\n \\n     prefers = [p.split('=', 1) for p in prefers]\\n \\n     items = sorted(clonebundleentry(v, prefers) for v in entries)\\n     return [i.value for i in items]\\n \\n def trypullbundlefromurl(ui, repo, url):\\n     \\\"\\\"\\\"Attempt to apply a bundle from a URL.\\\"\\\"\\\"\\n     with repo.lock(), repo.transaction('bundleurl') as tr:\\n         try:\\n             fh = urlmod.open(ui, url)\\n             cg = readbundle(ui, fh, 'stream')\\n \\n             if isinstance(cg, streamclone.streamcloneapplier):\\n                 cg.apply(repo)\\n             else:\\n                 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)\\n             return True\\n         except urlerr.httperror as e:\\n             ui.warn(_('HTTP error fetching bundle: %s\\\\n') %\\n                     stringutil.forcebytestr(e))\\n         except urlerr.urlerror as e:\\n             ui.warn(_('error fetching bundle: %s\\\\n') %\\n                     stringutil.forcebytestr(e.reason))\\n \\n         return False\\n\"}]}],\"properties\":[]}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "147"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B11058%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:31:55 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":\"diff --git a\\/tests\\/wireprotohelpers.sh b\\/tests\\/wireprotohelpers.sh\\n--- a\\/tests\\/wireprotohelpers.sh\\n+++ b\\/tests\\/wireprotohelpers.sh\\n@@ -56,3 +56,10 @@\\n web.api.http-v2 = true\\n EOF\\n }\\n+\\n+enablehttpv2client() {\\n+  cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n+[experimental]\\n+httppeer.advertise-v2 = true\\n+EOF\\n+}\\ndiff --git a\\/tests\\/test-wireproto-exchangev2.t b\\/tests\\/test-wireproto-exchangev2.t\\nnew file mode 100644\\n--- \\/dev\\/null\\n+++ b\\/tests\\/test-wireproto-exchangev2.t\\n@@ -0,0 +1,53 @@\\n+Tests for wire protocol version 2 exchange.\\n+Tests in this file should be folded into existing tests once protocol\\n+v2 has enough features that it can be enabled via #testcase in existing\\n+tests.\\n+\\n+  $ . $TESTDIR\\/wireprotohelpers.sh\\n+  $ enablehttpv2client\\n+\\n+  $ hg init server-simple\\n+  $ enablehttpv2 server-simple\\n+  $ cd server-simple\\n+  $ cat \\u003e\\u003e .hg\\/hgrc \\u003c\\u003c EOF\\n+  \\u003e [phases]\\n+  \\u003e publish = false\\n+  \\u003e EOF\\n+  $ echo a0 \\u003e a\\n+  $ echo b0 \\u003e b\\n+  $ hg -q commit -A -m 'commit 0'\\n+\\n+  $ echo a1 \\u003e a\\n+  $ hg commit -m 'commit 1'\\n+  $ hg phase --public -r .\\n+  $ echo a2 \\u003e a\\n+  $ hg commit -m 'commit 2'\\n+\\n+  $ hg -q up -r 0\\n+  $ echo b1 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 1'\\n+  $ echo b2 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 2'\\n+\\n+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log\\n+  $ cat hg.pid \\u003e $DAEMON_PIDS\\n+\\n+  $ cd ..\\n+\\n+Test basic clone\\n+\\n+  $ hg --debug clone -U http:\\/\\/localhost:$HGPORT client-simple\\n+  using http:\\/\\/localhost:$HGPORT\\/\\n+  sending capabilities command\\n+  query 1; heads\\n+  sending 2 commands\\n+  sending command heads: {}\\n+  sending command known: {\\n+    'nodes': []\\n+  }\\n+  received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)\\n+  received frame(size=43; request=1; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)\\n+  received frame(size=11; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=1; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)\\ndiff --git a\\/mercurial\\/httppeer.py b\\/mercurial\\/httppeer.py\\n--- a\\/mercurial\\/httppeer.py\\n+++ b\\/mercurial\\/httppeer.py\\n@@ -802,7 +802,8 @@\\n             return True\\n \\n         # Other concepts.\\n-        if name in ('bundle2',):\\n+        # TODO remove exchangev2 once we have a command implemented.\\n+        if name in ('bundle2', 'exchangev2'):\\n             return True\\n \\n         # Alias command-* to presence of command of that name.\\ndiff --git a\\/mercurial\\/exchangev2.py b\\/mercurial\\/exchangev2.py\\nnew file mode 100644\\n--- \\/dev\\/null\\n+++ b\\/mercurial\\/exchangev2.py\\n@@ -0,0 +1,55 @@\\n+# exchangev2.py - repository exchange for wire protocol version 2\\n+#\\n+# Copyright 2018 Gregory Szorc \\u003cgregory.szorc@gmail.com\\u003e\\n+#\\n+# This software may be used and distributed according to the terms of the\\n+# GNU General Public License version 2 or any later version.\\n+\\n+from __future__ import absolute_import\\n+\\n+from .node import (\\n+    nullid,\\n+)\\n+from . import (\\n+    setdiscovery,\\n+)\\n+\\n+def pull(pullop):\\n+    \\\"\\\"\\\"Pull using wire protocol version 2.\\\"\\\"\\\"\\n+    repo = pullop.repo\\n+    remote = pullop.remote\\n+\\n+    # Figure out what needs to be fetched.\\n+    common, fetch, remoteheads = _pullchangesetdiscovery(\\n+        repo, remote, pullop.heads, abortwhenunrelated=pullop.force)\\n+\\n+def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):\\n+    \\\"\\\"\\\"Determine which changesets need to be pulled.\\\"\\\"\\\"\\n+\\n+    if heads:\\n+        knownnode = repo.changelog.hasnode\\n+        if all(knownnode(head) for head in heads):\\n+            return heads, False, heads\\n+\\n+    # TODO wire protocol version 2 is capable of more efficient discovery\\n+    # than setdiscovery. Consider implementing something better.\\n+    common, fetch, remoteheads = setdiscovery.findcommonheads(\\n+        repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated)\\n+\\n+    common = set(common)\\n+    remoteheads = set(remoteheads)\\n+\\n+    # If a remote head is filtered locally, put it back in the common set.\\n+    # See the comment in exchange._pulldiscoverychangegroup() for more.\\n+\\n+    if fetch and remoteheads:\\n+        nodemap = repo.unfiltered().changelog.nodemap\\n+\\n+        common |= {head for head in remoteheads if head in nodemap}\\n+\\n+        if set(remoteheads).issubset(common):\\n+            fetch = []\\n+\\n+    common.discard(nullid)\\n+\\n+    return common, fetch, remoteheads\\ndiff --git a\\/mercurial\\/exchange.py b\\/mercurial\\/exchange.py\\n--- a\\/mercurial\\/exchange.py\\n+++ b\\/mercurial\\/exchange.py\\n@@ -26,6 +26,7 @@\\n     changegroup,\\n     discovery,\\n     error,\\n+    exchangev2,\\n     lock as lockmod,\\n     logexchange,\\n     narrowspec,\\n@@ -1506,17 +1507,21 @@\\n \\n     pullop.trmanager = transactionmanager(repo, 'pull', remote.url())\\n     with repo.wlock(), repo.lock(), pullop.trmanager:\\n-        # This should ideally be in _pullbundle2(). However, it needs to run\\n-        # before discovery to avoid extra work.\\n-        _maybeapplyclonebundle(pullop)\\n-        streamclone.maybeperformlegacystreamclone(pullop)\\n-        _pulldiscovery(pullop)\\n-        if pullop.canusebundle2:\\n-            _fullpullbundle2(repo, pullop)\\n-        _pullchangeset(pullop)\\n-        _pullphase(pullop)\\n-        _pullbookmarks(pullop)\\n-        _pullobsolete(pullop)\\n+        # Use the modern wire protocol, if available.\\n+        if remote.capable('exchangev2'):\\n+            exchangev2.pull(pullop)\\n+        else:\\n+            # This should ideally be in _pullbundle2(). However, it needs to run\\n+            # before discovery to avoid extra work.\\n+            _maybeapplyclonebundle(pullop)\\n+            streamclone.maybeperformlegacystreamclone(pullop)\\n+            _pulldiscovery(pullop)\\n+            if pullop.canusebundle2:\\n+                _fullpullbundle2(repo, pullop)\\n+            _pullchangeset(pullop)\\n+            _pullphase(pullop)\\n+            _pullbookmarks(pullop)\\n+            _pullobsolete(pullop)\\n \\n     # storing remotenames\\n     if repo.ui.configbool('experimental', 'remotenames'):\\n\\n\",\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.getrawdiff", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "144"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22diffID%22%3A+11058%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabread-conduit-error.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,73 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:31:53 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":\"ERR-INVALID-AUTH\",\"error_info\":\"API token \\\"cli-notavalidtoken\\\" has the wrong length. API tokens should be 32 characters long.\"}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "132"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B4480%5D%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabread-str-time.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,209 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:19 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"1285\",\"phid\":\"PHID-DREV-j3rjfvsetkqtvhk6dxye\",\"title\":\"repoview: add a new attribute _visibilityexceptions and related API\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D1285\",\"dateCreated\":\"1509645891\",\"dateModified\":\"1515574368\",\"authorPHID\":\"PHID-USER-34jnztnonbr4lhwuybwl\",\"status\":\"4\",\"statusName\":\"Abandoned\",\"properties\":[],\"branch\":null,\"summary\":\"Currently we don't have a defined way in core to make some hidden revisions\\nvisible in filtered repo. Extensions to achieve the purpose of unhiding some\\nhidden commits, wrap repoview.pinnedrevs() function.\\n\\nTo make the above task simple and have well defined API, this patch adds a new\\nattribute '_visibilityexceptions' to repoview class which will contains\\nthe hidden revs which should be exception.\\nThis will allow to set different exceptions for different repoview objects\\nbacked by the same unfiltered repo.\\n\\nThis patch also adds API to add revs to the attribute set and get them.\\n\\nThanks to Jun for suggesting the use of repoview class instead of localrepo.\",\"testPlan\":\"\",\"lineCount\":\"19\",\"activeDiffPHID\":\"PHID-DIFF-mpgcio4dbd3k3ab2ha6x\",\"diffs\":[\"4092\",\"4051\",\"3956\",\"3784\",\"3201\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\",\"PHID-USER-bdvomxbzdb42dlftorg4\":\"PHID-USER-bdvomxbzdb42dlftorg4\",\"PHID-USER-qwhdxkyioew7vwvxqc2g\":\"PHID-USER-qwhdxkyioew7vwvxqc2g\"},\"ccs\":[\"PHID-USER-qwhdxkyioew7vwvxqc2g\",\"PHID-USER-f6tllotq6q2rtpi47wjq\",\"PHID-USER-bdvomxbzdb42dlftorg4\",\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "146"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B1285%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:20 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"4092\":{\"id\":\"4092\",\"revisionID\":\"1285\",\"dateCreated\":\"1512396989\",\"dateModified\":\"1512396993\",\"sourceControlBaseRevision\":null,\"sourceControlPath\":null,\"sourceControlSystem\":null,\"branch\":null,\"bookmark\":null,\"creationMethod\":\"web\",\"description\":null,\"unitStatus\":\"4\",\"lintStatus\":\"4\",\"changes\":[{\"id\":\"9311\",\"metadata\":{\"line:first\":188},\"oldPath\":\"mercurial\\/repoview.py\",\"currentPath\":\"mercurial\\/repoview.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"11\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"242\",\"newLength\":\"253\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" # repoview.py - Filtered view of a localrepo object\\n #\\n # Copyright 2012 Pierre-Yves David \\u003cpierre-yves.david@ens-lyon.org\\u003e\\n #                Logilab SA        \\u003ccontact@logilab.fr\\u003e\\n #\\n # This software may be used and distributed according to the terms of the\\n # GNU General Public License version 2 or any later version.\\n \\n from __future__ import absolute_import\\n \\n import copy\\n \\n from .node import nullrev\\n from . import (\\n     obsolete,\\n     phases,\\n     tags as tagsmod,\\n )\\n \\n def hideablerevs(repo):\\n     \\\"\\\"\\\"Revision candidates to be hidden\\n \\n     This is a standalone function to allow extensions to wrap it.\\n \\n     Because we use the set of immutable changesets as a fallback subset in\\n     branchmap (see mercurial.branchmap.subsettable), you cannot set \\\"public\\\"\\n     changesets as \\\"hideable\\\". Doing so would break multiple code assertions and\\n     lead to crashes.\\\"\\\"\\\"\\n     return obsolete.getrevs(repo, 'obsolete')\\n \\n def pinnedrevs(repo):\\n     \\\"\\\"\\\"revisions blocking hidden changesets from being filtered\\n     \\\"\\\"\\\"\\n \\n     cl = repo.changelog\\n     pinned = set()\\n     pinned.update([par.rev() for par in repo[None].parents()])\\n     pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()])\\n \\n     tags = {}\\n     tagsmod.readlocaltags(repo.ui, repo, tags, {})\\n     if tags:\\n         rev, nodemap = cl.rev, cl.nodemap\\n         pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)\\n     return pinned\\n \\n \\n def _revealancestors(pfunc, hidden, revs):\\n     \\\"\\\"\\\"reveals contiguous chains of hidden ancestors of 'revs' by removing them\\n     from 'hidden'\\n \\n     - pfunc(r): a funtion returning parent of 'r',\\n     - hidden: the (preliminary) hidden revisions, to be updated\\n     - revs: iterable of revnum,\\n \\n     (Ancestors are revealed exclusively, i.e. the elements in 'revs' are\\n     *not* revealed)\\n     \\\"\\\"\\\"\\n     stack = list(revs)\\n     while stack:\\n         for p in pfunc(stack.pop()):\\n             if p != nullrev and p in hidden:\\n                 hidden.remove(p)\\n                 stack.append(p)\\n \\n def computehidden(repo):\\n     \\\"\\\"\\\"compute the set of hidden revision to filter\\n \\n     During most operation hidden should be filtered.\\\"\\\"\\\"\\n     assert not repo.changelog.filteredrevs\\n \\n     hidden = hideablerevs(repo)\\n     if hidden:\\n         hidden = set(hidden - pinnedrevs(repo))\\n         pfunc = repo.changelog.parentrevs\\n         mutablephases = (phases.draft, phases.secret)\\n         mutable = repo._phasecache.getrevset(repo, mutablephases)\\n \\n         visible = mutable - hidden\\n         _revealancestors(pfunc, hidden, visible)\\n     return frozenset(hidden)\\n \\n def computeunserved(repo):\\n     \\\"\\\"\\\"compute the set of revision that should be filtered when used a server\\n \\n     Secret and hidden changeset should not pretend to be here.\\\"\\\"\\\"\\n     assert not repo.changelog.filteredrevs\\n     # fast path in simple case to avoid impact of non optimised code\\n     hiddens = filterrevs(repo, 'visible')\\n     if phases.hassecret(repo):\\n         cl = repo.changelog\\n         secret = phases.secret\\n         getphase = repo._phasecache.phase\\n         first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])\\n         revs = cl.revs(start=first)\\n         secrets = set(r for r in revs if getphase(repo, r) \\u003e= secret)\\n         return frozenset(hiddens | secrets)\\n     else:\\n         return hiddens\\n \\n def computemutable(repo):\\n     assert not repo.changelog.filteredrevs\\n     # fast check to avoid revset call on huge repo\\n     if any(repo._phasecache.phaseroots[1:]):\\n         getphase = repo._phasecache.phase\\n         maymutable = filterrevs(repo, 'base')\\n         return frozenset(r for r in maymutable if getphase(repo, r))\\n     return frozenset()\\n \\n def computeimpactable(repo):\\n     \\\"\\\"\\\"Everything impactable by mutable revision\\n \\n     The immutable filter still have some chance to get invalidated. This will\\n     happen when:\\n \\n     - you garbage collect hidden changeset,\\n     - public phase is moved backward,\\n     - something is changed in the filtering (this could be fixed)\\n \\n     This filter out any mutable changeset and any public changeset that may be\\n     impacted by something happening to a mutable revision.\\n \\n     This is achieved by filtered everything with a revision number egal or\\n     higher than the first mutable changeset is filtered.\\\"\\\"\\\"\\n     assert not repo.changelog.filteredrevs\\n     cl = repo.changelog\\n     firstmutable = len(cl)\\n     for roots in repo._phasecache.phaseroots[1:]:\\n         if roots:\\n             firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))\\n     # protect from nullrev root\\n     firstmutable = max(0, firstmutable)\\n     return frozenset(xrange(firstmutable, len(cl)))\\n \\n # function to compute filtered set\\n #\\n # When adding a new filter you MUST update the table at:\\n #     mercurial.branchmap.subsettable\\n # Otherwise your filter will have to recompute all its branches cache\\n # from scratch (very slow).\\n filtertable = {'visible': computehidden,\\n                'served': computeunserved,\\n                'immutable':  computemutable,\\n                'base':  computeimpactable}\\n \\n def filterrevs(repo, filtername):\\n     \\\"\\\"\\\"returns set of filtered revision for this filter name\\\"\\\"\\\"\\n     if filtername not in repo.filteredrevcache:\\n         func = filtertable[filtername]\\n         repo.filteredrevcache[filtername] = func(repo.unfiltered())\\n     return repo.filteredrevcache[filtername]\\n \\n class repoview(object):\\n     \\\"\\\"\\\"Provide a read\\/write view of a repo through a filtered changelog\\n \\n     This object is used to access a filtered version of a repository without\\n     altering the original repository object itself. We can not alter the\\n     original object for two main reasons:\\n     - It prevents the use of a repo with multiple filters at the same time. In\\n       particular when multiple threads are involved.\\n     - It makes scope of the filtering harder to control.\\n \\n     This object behaves very closely to the original repository. All attribute\\n     operations are done on the original repository:\\n     - An access to `repoview.someattr` actually returns `repo.someattr`,\\n     - A write to `repoview.someattr` actually sets value of `repo.someattr`,\\n     - A deletion of `repoview.someattr` actually drops `someattr`\\n       from `repo.__dict__`.\\n \\n     The only exception is the `changelog` property. It is overridden to return\\n     a (surface) copy of `repo.changelog` with some revisions filtered. The\\n     `filtername` attribute of the view control the revisions that need to be\\n     filtered.  (the fact the changelog is copied is an implementation detail).\\n \\n     Unlike attributes, this object intercepts all method calls. This means that\\n     all methods are run on the `repoview` object with the filtered `changelog`\\n     property. For this purpose the simple `repoview` class must be mixed with\\n     the actual class of the repository. This ensures that the resulting\\n     `repoview` object have the very same methods than the repo object. This\\n     leads to the property below.\\n \\n         repoview.method() --\\u003e repo.__class__.method(repoview)\\n \\n     The inheritance has to be done dynamically because `repo` can be of any\\n     subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.\\n     \\\"\\\"\\\"\\n \\n+    # hidden revs which should be visible\\n+    _visibilityexceptions = set()\\n+\\n     def __init__(self, repo, filtername):\\n         object.__setattr__(self, r'_unfilteredrepo', repo)\\n         object.__setattr__(self, r'filtername', filtername)\\n         object.__setattr__(self, r'_clcachekey', None)\\n         object.__setattr__(self, r'_clcache', None)\\n \\n     # not a propertycache on purpose we shall implement a proper cache later\\n     @property\\n     def changelog(self):\\n         \\\"\\\"\\\"return a filtered version of the changeset\\n \\n         this changelog must not be used for writing\\\"\\\"\\\"\\n         # some cache may be implemented later\\n         unfi = self._unfilteredrepo\\n         unfichangelog = unfi.changelog\\n         # bypass call to changelog.method\\n         unfiindex = unfichangelog.index\\n         unfilen = len(unfiindex) - 1\\n         unfinode = unfiindex[unfilen - 1][7]\\n \\n         revs = filterrevs(unfi, self.filtername)\\n         cl = self._clcache\\n         newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)\\n         # if cl.index is not unfiindex, unfi.changelog would be\\n         # recreated, and our clcache refers to garbage object\\n         if (cl is not None and\\n             (cl.index is not unfiindex or newkey != self._clcachekey)):\\n             cl = None\\n         # could have been made None by the previous if\\n         if cl is None:\\n             cl = copy.copy(unfichangelog)\\n             cl.filteredrevs = revs\\n             object.__setattr__(self, r'_clcache', cl)\\n             object.__setattr__(self, r'_clcachekey', newkey)\\n         return cl\\n \\n     def unfiltered(self):\\n         \\\"\\\"\\\"Return an unfiltered version of a repo\\\"\\\"\\\"\\n         return self._unfilteredrepo\\n \\n     def filtered(self, name):\\n         \\\"\\\"\\\"Return a filtered version of a repository\\\"\\\"\\\"\\n         if name == self.filtername:\\n             return self\\n         return self.unfiltered().filtered(name)\\n \\n+    def addvisibilityexceptions(self, revs):\\n+        \\\"\\\"\\\"adds hidden revs which should be visible to set of exceptions\\\"\\\"\\\"\\n+        self._visibilityexceptions.update(revs)\\n+\\n+    def getvisibilityexceptions(self):\\n+        \\\"\\\"\\\"returns the set of hidden revs which should be visible\\\"\\\"\\\"\\n+        return self._visibilityexceptions\\n+\\n     # everything access are forwarded to the proxied repo\\n     def __getattr__(self, attr):\\n         return getattr(self._unfilteredrepo, attr)\\n \\n     def __setattr__(self, attr, value):\\n         return setattr(self._unfilteredrepo, attr, value)\\n \\n     def __delattr__(self, attr):\\n         return delattr(self._unfilteredrepo, attr)\\n\"}]},{\"id\":\"9310\",\"metadata\":{\"line:first\":573},\"oldPath\":\"mercurial\\/localrepo.py\",\"currentPath\":\"mercurial\\/localrepo.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"8\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"2301\",\"newLength\":\"2309\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" # localrepo.py - read\\/write repository class for mercurial\\n #\\n # Copyright 2005-2007 Matt Mackall \\u003cmpm@selenic.com\\u003e\\n #\\n # This software may be used and distributed according to the terms of the\\n # GNU General Public License version 2 or any later version.\\n \\n from __future__ import absolute_import\\n \\n import errno\\n import hashlib\\n import inspect\\n import os\\n import random\\n import time\\n import weakref\\n \\n from .i18n import _\\n from .node import (\\n     hex,\\n     nullid,\\n     short,\\n )\\n from . import (\\n     bookmarks,\\n     branchmap,\\n     bundle2,\\n     changegroup,\\n     changelog,\\n     color,\\n     context,\\n     dirstate,\\n     dirstateguard,\\n     discovery,\\n     encoding,\\n     error,\\n     exchange,\\n     extensions,\\n     filelog,\\n     hook,\\n     lock as lockmod,\\n     manifest,\\n     match as matchmod,\\n     merge as mergemod,\\n     mergeutil,\\n     namespaces,\\n     obsolete,\\n     pathutil,\\n     peer,\\n     phases,\\n     pushkey,\\n     pycompat,\\n     repository,\\n     repoview,\\n     revset,\\n     revsetlang,\\n     scmutil,\\n     sparse,\\n     store,\\n     subrepo,\\n     tags as tagsmod,\\n     transaction,\\n     txnutil,\\n     util,\\n     vfs as vfsmod,\\n )\\n \\n release = lockmod.release\\n urlerr = util.urlerr\\n urlreq = util.urlreq\\n \\n # set of (path, vfs-location) tuples. vfs-location is:\\n # - 'plain for vfs relative paths\\n # - '' for svfs relative paths\\n _cachedfiles = set()\\n \\n class _basefilecache(scmutil.filecache):\\n     \\\"\\\"\\\"All filecache usage on repo are done for logic that should be unfiltered\\n     \\\"\\\"\\\"\\n     def __get__(self, repo, type=None):\\n         if repo is None:\\n             return self\\n         return super(_basefilecache, self).__get__(repo.unfiltered(), type)\\n     def __set__(self, repo, value):\\n         return super(_basefilecache, self).__set__(repo.unfiltered(), value)\\n     def __delete__(self, repo):\\n         return super(_basefilecache, self).__delete__(repo.unfiltered())\\n \\n class repofilecache(_basefilecache):\\n     \\\"\\\"\\\"filecache for files in .hg but outside of .hg\\/store\\\"\\\"\\\"\\n     def __init__(self, *paths):\\n         super(repofilecache, self).__init__(*paths)\\n         for path in paths:\\n             _cachedfiles.add((path, 'plain'))\\n \\n     def join(self, obj, fname):\\n         return obj.vfs.join(fname)\\n \\n class storecache(_basefilecache):\\n     \\\"\\\"\\\"filecache for files in the store\\\"\\\"\\\"\\n     def __init__(self, *paths):\\n         super(storecache, self).__init__(*paths)\\n         for path in paths:\\n             _cachedfiles.add((path, ''))\\n \\n     def join(self, obj, fname):\\n         return obj.sjoin(fname)\\n \\n def isfilecached(repo, name):\\n     \\\"\\\"\\\"check if a repo has already cached \\\"name\\\" filecache-ed property\\n \\n     This returns (cachedobj-or-None, iscached) tuple.\\n     \\\"\\\"\\\"\\n     cacheentry = repo.unfiltered()._filecache.get(name, None)\\n     if not cacheentry:\\n         return None, False\\n     return cacheentry.obj, True\\n \\n class unfilteredpropertycache(util.propertycache):\\n     \\\"\\\"\\\"propertycache that apply to unfiltered repo only\\\"\\\"\\\"\\n \\n     def __get__(self, repo, type=None):\\n         unfi = repo.unfiltered()\\n         if unfi is repo:\\n             return super(unfilteredpropertycache, self).__get__(unfi)\\n         return getattr(unfi, self.name)\\n \\n class filteredpropertycache(util.propertycache):\\n     \\\"\\\"\\\"propertycache that must take filtering in account\\\"\\\"\\\"\\n \\n     def cachevalue(self, obj, value):\\n         object.__setattr__(obj, self.name, value)\\n \\n \\n def hasunfilteredcache(repo, name):\\n     \\\"\\\"\\\"check if a repo has an unfilteredpropertycache value for \\u003cname\\u003e\\\"\\\"\\\"\\n     return name in vars(repo.unfiltered())\\n \\n def unfilteredmethod(orig):\\n     \\\"\\\"\\\"decorate method that always need to be run on unfiltered version\\\"\\\"\\\"\\n     def wrapper(repo, *args, **kwargs):\\n         return orig(repo.unfiltered(), *args, **kwargs)\\n     return wrapper\\n \\n moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',\\n               'unbundle'}\\n legacycaps = moderncaps.union({'changegroupsubset'})\\n \\n class localpeer(repository.peer):\\n     '''peer for a local repo; reflects only the most recent API'''\\n \\n     def __init__(self, repo, caps=None):\\n         super(localpeer, self).__init__()\\n \\n         if caps is None:\\n             caps = moderncaps.copy()\\n         self._repo = repo.filtered('served')\\n         self._ui = repo.ui\\n         self._caps = repo._restrictcapabilities(caps)\\n \\n     # Begin of _basepeer interface.\\n \\n     @util.propertycache\\n     def ui(self):\\n         return self._ui\\n \\n     def url(self):\\n         return self._repo.url()\\n \\n     def local(self):\\n         return self._repo\\n \\n     def peer(self):\\n         return self\\n \\n     def canpush(self):\\n         return True\\n \\n     def close(self):\\n         self._repo.close()\\n \\n     # End of _basepeer interface.\\n \\n     # Begin of _basewirecommands interface.\\n \\n     def branchmap(self):\\n         return self._repo.branchmap()\\n \\n     def capabilities(self):\\n         return self._caps\\n \\n     def debugwireargs(self, one, two, three=None, four=None, five=None):\\n         \\\"\\\"\\\"Used to test argument passing over the wire\\\"\\\"\\\"\\n         return \\\"%s %s %s %s %s\\\" % (one, two, three, four, five)\\n \\n     def getbundle(self, source, heads=None, common=None, bundlecaps=None,\\n                   **kwargs):\\n         chunks = exchange.getbundlechunks(self._repo, source, heads=heads,\\n                                           common=common, bundlecaps=bundlecaps,\\n                                           **kwargs)\\n         cb = util.chunkbuffer(chunks)\\n \\n         if exchange.bundle2requested(bundlecaps):\\n             # When requesting a bundle2, getbundle returns a stream to make the\\n             # wire level function happier. We need to build a proper object\\n             # from it in local peer.\\n             return bundle2.getunbundler(self.ui, cb)\\n         else:\\n             return changegroup.getunbundler('01', cb, None)\\n \\n     def heads(self):\\n         return self._repo.heads()\\n \\n     def known(self, nodes):\\n         return self._repo.known(nodes)\\n \\n     def listkeys(self, namespace):\\n         return self._repo.listkeys(namespace)\\n \\n     def lookup(self, key):\\n         return self._repo.lookup(key)\\n \\n     def pushkey(self, namespace, key, old, new):\\n         return self._repo.pushkey(namespace, key, old, new)\\n \\n     def stream_out(self):\\n         raise error.Abort(_('cannot perform stream clone against local '\\n                             'peer'))\\n \\n     def unbundle(self, cg, heads, url):\\n         \\\"\\\"\\\"apply a bundle on a repo\\n \\n         This function handles the repo locking itself.\\\"\\\"\\\"\\n         try:\\n             try:\\n                 cg = exchange.readbundle(self.ui, cg, None)\\n                 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)\\n                 if util.safehasattr(ret, 'getchunks'):\\n                     # This is a bundle20 object, turn it into an unbundler.\\n                     # This little dance should be dropped eventually when the\\n                     # API is finally improved.\\n                     stream = util.chunkbuffer(ret.getchunks())\\n                     ret = bundle2.getunbundler(self.ui, stream)\\n                 return ret\\n             except Exception as exc:\\n                 # If the exception contains output salvaged from a bundle2\\n                 # reply, we need to make sure it is printed before continuing\\n                 # to fail. So we build a bundle2 with such output and consume\\n                 # it directly.\\n                 #\\n                 # This is not very elegant but allows a \\\"simple\\\" solution for\\n                 # issue4594\\n                 output = getattr(exc, '_bundle2salvagedoutput', ())\\n                 if output:\\n                     bundler = bundle2.bundle20(self._repo.ui)\\n                     for out in output:\\n                         bundler.addpart(out)\\n                     stream = util.chunkbuffer(bundler.getchunks())\\n                     b = bundle2.getunbundler(self.ui, stream)\\n                     bundle2.processbundle(self._repo, b)\\n                 raise\\n         except error.PushRaced as exc:\\n             raise error.ResponseError(_('push failed:'), str(exc))\\n \\n     # End of _basewirecommands interface.\\n \\n     # Begin of peer interface.\\n \\n     def iterbatch(self):\\n         return peer.localiterbatcher(self)\\n \\n     # End of peer interface.\\n \\n class locallegacypeer(repository.legacypeer, localpeer):\\n     '''peer extension which implements legacy methods too; used for tests with\\n     restricted capabilities'''\\n \\n     def __init__(self, repo):\\n         super(locallegacypeer, self).__init__(repo, caps=legacycaps)\\n \\n     # Begin of baselegacywirecommands interface.\\n \\n     def between(self, pairs):\\n         return self._repo.between(pairs)\\n \\n     def branches(self, nodes):\\n         return self._repo.branches(nodes)\\n \\n     def changegroup(self, basenodes, source):\\n         outgoing = discovery.outgoing(self._repo, missingroots=basenodes,\\n                                       missingheads=self._repo.heads())\\n         return changegroup.makechangegroup(self._repo, outgoing, '01', source)\\n \\n     def changegroupsubset(self, bases, heads, source):\\n         outgoing = discovery.outgoing(self._repo, missingroots=bases,\\n                                       missingheads=heads)\\n         return changegroup.makechangegroup(self._repo, outgoing, '01', source)\\n \\n     # End of baselegacywirecommands interface.\\n \\n # Increment the sub-version when the revlog v2 format changes to lock out old\\n # clients.\\n REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'\\n \\n class localrepository(object):\\n \\n     supportedformats = {\\n         'revlogv1',\\n         'generaldelta',\\n         'treemanifest',\\n         'manifestv2',\\n         REVLOGV2_REQUIREMENT,\\n     }\\n     _basesupported = supportedformats | {\\n         'store',\\n         'fncache',\\n         'shared',\\n         'relshared',\\n         'dotencode',\\n         'exp-sparse',\\n     }\\n     openerreqs = {\\n         'revlogv1',\\n         'generaldelta',\\n         'treemanifest',\\n         'manifestv2',\\n     }\\n \\n     # a list of (ui, featureset) functions.\\n     # only functions defined in module of enabled extensions are invoked\\n     featuresetupfuncs = set()\\n \\n     # list of prefix for file which can be written without 'wlock'\\n     # Extensions should extend this list when needed\\n     _wlockfreeprefix = {\\n         # We migh consider requiring 'wlock' for the next\\n         # two, but pretty much all the existing code assume\\n         # wlock is not needed so we keep them excluded for\\n         # now.\\n         'hgrc',\\n         'requires',\\n         # XXX cache is a complicatged business someone\\n         # should investigate this in depth at some point\\n         'cache\\/',\\n         # XXX shouldn't be dirstate covered by the wlock?\\n         'dirstate',\\n         # XXX bisect was still a bit too messy at the time\\n         # this changeset was introduced. Someone should fix\\n         # the remainig bit and drop this line\\n         'bisect.state',\\n     }\\n \\n     def __init__(self, baseui, path, create=False):\\n         self.requirements = set()\\n         self.filtername = None\\n         # wvfs: rooted at the repository root, used to access the working copy\\n         self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)\\n         # vfs: rooted at .hg, used to access repo files outside of .hg\\/store\\n         self.vfs = None\\n         # svfs: usually rooted at .hg\\/store, used to access repository history\\n         # If this is a shared repository, this vfs may point to another\\n         # repository's .hg\\/store directory.\\n         self.svfs = None\\n         self.root = self.wvfs.base\\n         self.path = self.wvfs.join(\\\".hg\\\")\\n         self.origroot = path\\n         # This is only used by context.workingctx.match in order to\\n         # detect files in subrepos.\\n         self.auditor = pathutil.pathauditor(\\n             self.root, callback=self._checknested)\\n         # This is only used by context.basectx.match in order to detect\\n         # files in subrepos.\\n         self.nofsauditor = pathutil.pathauditor(\\n             self.root, callback=self._checknested, realfs=False, cached=True)\\n         self.baseui = baseui\\n         self.ui = baseui.copy()\\n         self.ui.copy = baseui.copy # prevent copying repo configuration\\n         self.vfs = vfsmod.vfs(self.path, cacheaudited=True)\\n         if (self.ui.configbool('devel', 'all-warnings') or\\n             self.ui.configbool('devel', 'check-locks')):\\n             self.vfs.audit = self._getvfsward(self.vfs.audit)\\n         # A list of callback to shape the phase if no data were found.\\n         # Callback are in the form: func(repo, roots) --\\u003e processed root.\\n         # This list it to be filled by extension during repo setup\\n         self._phasedefaults = []\\n         try:\\n             self.ui.readconfig(self.vfs.join(\\\"hgrc\\\"), self.root)\\n             self._loadextensions()\\n         except IOError:\\n             pass\\n \\n         if self.featuresetupfuncs:\\n             self.supported = set(self._basesupported) # use private copy\\n             extmods = set(m.__name__ for n, m\\n                           in extensions.extensions(self.ui))\\n             for setupfunc in self.featuresetupfuncs:\\n                 if setupfunc.__module__ in extmods:\\n                     setupfunc(self.ui, self.supported)\\n         else:\\n             self.supported = self._basesupported\\n         color.setup(self.ui)\\n \\n         # Add compression engines.\\n         for name in util.compengines:\\n             engine = util.compengines[name]\\n             if engine.revlogheader():\\n                 self.supported.add('exp-compression-%s' % name)\\n \\n         if not self.vfs.isdir():\\n             if create:\\n                 self.requirements = newreporequirements(self)\\n \\n                 if not self.wvfs.exists():\\n                     self.wvfs.makedirs()\\n                 self.vfs.makedir(notindexed=True)\\n \\n                 if 'store' in self.requirements:\\n                     self.vfs.mkdir(\\\"store\\\")\\n \\n                     # create an invalid changelog\\n                     self.vfs.append(\\n                         \\\"00changelog.i\\\",\\n                         '\\\\0\\\\0\\\\0\\\\2' # represents revlogv2\\n                         ' dummy changelog to prevent using the old repo layout'\\n                     )\\n             else:\\n                 raise error.RepoError(_(\\\"repository %s not found\\\") % path)\\n         elif create:\\n             raise error.RepoError(_(\\\"repository %s already exists\\\") % path)\\n         else:\\n             try:\\n                 self.requirements = scmutil.readrequires(\\n                         self.vfs, self.supported)\\n             except IOError as inst:\\n                 if inst.errno != errno.ENOENT:\\n                     raise\\n \\n         cachepath = self.vfs.join('cache')\\n         self.sharedpath = self.path\\n         try:\\n             sharedpath = self.vfs.read(\\\"sharedpath\\\").rstrip('\\\\n')\\n             if 'relshared' in self.requirements:\\n                 sharedpath = self.vfs.join(sharedpath)\\n             vfs = vfsmod.vfs(sharedpath, realpath=True)\\n             cachepath = vfs.join('cache')\\n             s = vfs.base\\n             if not vfs.exists():\\n                 raise error.RepoError(\\n                     _('.hg\\/sharedpath points to nonexistent directory %s') % s)\\n             self.sharedpath = s\\n         except IOError as inst:\\n             if inst.errno != errno.ENOENT:\\n                 raise\\n \\n         if 'exp-sparse' in self.requirements and not sparse.enabled:\\n             raise error.RepoError(_('repository is using sparse feature but '\\n                                     'sparse is not enabled; enable the '\\n                                     '\\\"sparse\\\" extensions to access'))\\n \\n         self.store = store.store(\\n             self.requirements, self.sharedpath,\\n             lambda base: vfsmod.vfs(base, cacheaudited=True))\\n         self.spath = self.store.path\\n         self.svfs = self.store.vfs\\n         self.sjoin = self.store.join\\n         self.vfs.createmode = self.store.createmode\\n         self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)\\n         self.cachevfs.createmode = self.store.createmode\\n         if (self.ui.configbool('devel', 'all-warnings') or\\n             self.ui.configbool('devel', 'check-locks')):\\n             if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs\\n                 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)\\n             else: # standard vfs\\n                 self.svfs.audit = self._getsvfsward(self.svfs.audit)\\n         self._applyopenerreqs()\\n         if create:\\n             self._writerequirements()\\n \\n         self._dirstatevalidatewarned = False\\n \\n         self._branchcaches = {}\\n         self._revbranchcache = None\\n         self.filterpats = {}\\n         self._datafilters = {}\\n         self._transref = self._lockref = self._wlockref = None\\n \\n         # A cache for various files under .hg\\/ that tracks file changes,\\n         # (used by the filecache decorator)\\n         #\\n         # Maps a property name to its util.filecacheentry\\n         self._filecache = {}\\n \\n         # hold sets of revision to be filtered\\n         # should be cleared when something might have changed the filter value:\\n         # - new changesets,\\n         # - phase change,\\n         # - new obsolescence marker,\\n         # - working directory parent change,\\n         # - bookmark changes\\n         self.filteredrevcache = {}\\n \\n         # post-dirstate-status hooks\\n         self._postdsstatus = []\\n \\n         # Cache of types representing filtered repos.\\n         self._filteredrepotypes = weakref.WeakKeyDictionary()\\n \\n         # generic mapping between names and nodes\\n         self.names = namespaces.namespaces()\\n \\n         # Key to signature value.\\n         self._sparsesignaturecache = {}\\n         # Signature to cached matcher instance.\\n         self._sparsematchercache = {}\\n \\n     def _getvfsward(self, origfunc):\\n         \\\"\\\"\\\"build a ward for self.vfs\\\"\\\"\\\"\\n         rref = weakref.ref(self)\\n         def checkvfs(path, mode=None):\\n             ret = origfunc(path, mode=mode)\\n             repo = rref()\\n             if (repo is None\\n                 or not util.safehasattr(repo, '_wlockref')\\n                 or not util.safehasattr(repo, '_lockref')):\\n                 return\\n             if mode in (None, 'r', 'rb'):\\n                 return\\n             if path.startswith(repo.path):\\n                 # truncate name relative to the repository (.hg)\\n                 path = path[len(repo.path) + 1:]\\n             if path.startswith('cache\\/'):\\n                 msg = 'accessing cache with vfs instead of cachevfs: \\\"%s\\\"'\\n                 repo.ui.develwarn(msg % path, stacklevel=2, config=\\\"cache-vfs\\\")\\n             if path.startswith('journal.'):\\n                 # journal is covered by 'lock'\\n                 if repo._currentlock(repo._lockref) is None:\\n                     repo.ui.develwarn('write with no lock: \\\"%s\\\"' % path,\\n                                       stacklevel=2, config='check-locks')\\n             elif repo._currentlock(repo._wlockref) is None:\\n                 # rest of vfs files are covered by 'wlock'\\n                 #\\n                 # exclude special files\\n                 for prefix in self._wlockfreeprefix:\\n                     if path.startswith(prefix):\\n                         return\\n                 repo.ui.develwarn('write with no wlock: \\\"%s\\\"' % path,\\n                                   stacklevel=2, config='check-locks')\\n             return ret\\n         return checkvfs\\n \\n     def _getsvfsward(self, origfunc):\\n         \\\"\\\"\\\"build a ward for self.svfs\\\"\\\"\\\"\\n         rref = weakref.ref(self)\\n         def checksvfs(path, mode=None):\\n             ret = origfunc(path, mode=mode)\\n             repo = rref()\\n             if repo is None or not util.safehasattr(repo, '_lockref'):\\n                 return\\n             if mode in (None, 'r', 'rb'):\\n                 return\\n             if path.startswith(repo.sharedpath):\\n                 # truncate name relative to the repository (.hg)\\n                 path = path[len(repo.sharedpath) + 1:]\\n             if repo._currentlock(repo._lockref) is None:\\n                 repo.ui.develwarn('write with no lock: \\\"%s\\\"' % path,\\n                                   stacklevel=3)\\n             return ret\\n         return checksvfs\\n \\n     def close(self):\\n         self._writecaches()\\n \\n+    def addvisibilityexceptions(self, exceptions):\\n+        # should be called on a filtered repository\\n+        pass\\n+\\n+    def getvisibilityexceptions(self):\\n+        # should be called on a filtered repository\\n+        return set()\\n+\\n     def _loadextensions(self):\\n         extensions.loadall(self.ui)\\n \\n     def _writecaches(self):\\n         if self._revbranchcache:\\n             self._revbranchcache.write()\\n \\n     def _restrictcapabilities(self, caps):\\n         if self.ui.configbool('experimental', 'bundle2-advertise'):\\n             caps = set(caps)\\n             capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))\\n             caps.add('bundle2=' + urlreq.quote(capsblob))\\n         return caps\\n \\n     def _applyopenerreqs(self):\\n         self.svfs.options = dict((r, 1) for r in self.requirements\\n                                            if r in self.openerreqs)\\n         # experimental config: format.chunkcachesize\\n         chunkcachesize = self.ui.configint('format', 'chunkcachesize')\\n         if chunkcachesize is not None:\\n             self.svfs.options['chunkcachesize'] = chunkcachesize\\n         # experimental config: format.maxchainlen\\n         maxchainlen = self.ui.configint('format', 'maxchainlen')\\n         if maxchainlen is not None:\\n             self.svfs.options['maxchainlen'] = maxchainlen\\n         # experimental config: format.manifestcachesize\\n         manifestcachesize = self.ui.configint('format', 'manifestcachesize')\\n         if manifestcachesize is not None:\\n             self.svfs.options['manifestcachesize'] = manifestcachesize\\n         # experimental config: format.aggressivemergedeltas\\n         aggressivemergedeltas = self.ui.configbool('format',\\n                                                    'aggressivemergedeltas')\\n         self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas\\n         self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)\\n         chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')\\n         if 0 \\u003c= chainspan:\\n             self.svfs.options['maxdeltachainspan'] = chainspan\\n         mmapindexthreshold = self.ui.configbytes('experimental',\\n                                                  'mmapindexthreshold')\\n         if mmapindexthreshold is not None:\\n             self.svfs.options['mmapindexthreshold'] = mmapindexthreshold\\n         withsparseread = self.ui.configbool('experimental', 'sparse-read')\\n         srdensitythres = float(self.ui.config('experimental',\\n                                               'sparse-read.density-threshold'))\\n         srmingapsize = self.ui.configbytes('experimental',\\n                                            'sparse-read.min-gap-size')\\n         self.svfs.options['with-sparse-read'] = withsparseread\\n         self.svfs.options['sparse-read-density-threshold'] = srdensitythres\\n         self.svfs.options['sparse-read-min-gap-size'] = srmingapsize\\n \\n         for r in self.requirements:\\n             if r.startswith('exp-compression-'):\\n                 self.svfs.options['compengine'] = r[len('exp-compression-'):]\\n \\n         # TODO move \\\"revlogv2\\\" to openerreqs once finalized.\\n         if REVLOGV2_REQUIREMENT in self.requirements:\\n             self.svfs.options['revlogv2'] = True\\n \\n     def _writerequirements(self):\\n         scmutil.writerequires(self.vfs, self.requirements)\\n \\n     def _checknested(self, path):\\n         \\\"\\\"\\\"Determine if path is a legal nested repository.\\\"\\\"\\\"\\n         if not path.startswith(self.root):\\n             return False\\n         subpath = path[len(self.root) + 1:]\\n         normsubpath = util.pconvert(subpath)\\n \\n         # XXX: Checking against the current working copy is wrong in\\n         # the sense that it can reject things like\\n         #\\n         #   $ hg cat -r 10 sub\\/x.txt\\n         #\\n         # if sub\\/ is no longer a subrepository in the working copy\\n         # parent revision.\\n         #\\n         # However, it can of course also allow things that would have\\n         # been rejected before, such as the above cat command if sub\\/\\n         # is a subrepository now, but was a normal directory before.\\n         # The old path auditor would have rejected by mistake since it\\n         # panics when it sees sub\\/.hg\\/.\\n         #\\n         # All in all, checking against the working copy seems sensible\\n         # since we want to prevent access to nested repositories on\\n         # the filesystem *now*.\\n         ctx = self[None]\\n         parts = util.splitpath(subpath)\\n         while parts:\\n             prefix = '\\/'.join(parts)\\n             if prefix in ctx.substate:\\n                 if prefix == normsubpath:\\n                     return True\\n                 else:\\n                     sub = ctx.sub(prefix)\\n                     return sub.checknested(subpath[len(prefix) + 1:])\\n             else:\\n                 parts.pop()\\n         return False\\n \\n     def peer(self):\\n         return localpeer(self) # not cached to avoid reference cycle\\n \\n     def unfiltered(self):\\n         \\\"\\\"\\\"Return unfiltered version of the repository\\n \\n         Intended to be overwritten by filtered repo.\\\"\\\"\\\"\\n         return self\\n \\n     def filtered(self, name):\\n         \\\"\\\"\\\"Return a filtered version of a repository\\\"\\\"\\\"\\n         # Python \\u003c3.4 easily leaks types via __mro__. See\\n         # https:\\/\\/bugs.python.org\\/issue17950. We cache dynamically\\n         # created types so this method doesn't leak on every\\n         # invocation.\\n \\n         key = self.unfiltered().__class__\\n         if key not in self._filteredrepotypes:\\n             # Build a new type with the repoview mixin and the base\\n             # class of this repo. Give it a name containing the\\n             # filter name to aid debugging.\\n             bases = (repoview.repoview, key)\\n             cls = type(r'%sfilteredrepo' % name, bases, {})\\n             self._filteredrepotypes[key] = cls\\n \\n         return self._filteredrepotypes[key](self, name)\\n \\n     @repofilecache('bookmarks', 'bookmarks.current')\\n     def _bookmarks(self):\\n         return bookmarks.bmstore(self)\\n \\n     @property\\n     def _activebookmark(self):\\n         return self._bookmarks.active\\n \\n     # _phaserevs and _phasesets depend on changelog. what we need is to\\n     # call _phasecache.invalidate() if '00changelog.i' was changed, but it\\n     # can't be easily expressed in filecache mechanism.\\n     @storecache('phaseroots', '00changelog.i')\\n     def _phasecache(self):\\n         return phases.phasecache(self, self._phasedefaults)\\n \\n     @storecache('obsstore')\\n     def obsstore(self):\\n         return obsolete.makestore(self.ui, self)\\n \\n     @storecache('00changelog.i')\\n     def changelog(self):\\n         return changelog.changelog(self.svfs,\\n                                    trypending=txnutil.mayhavepending(self.root))\\n \\n     def _constructmanifest(self):\\n         # This is a temporary function while we migrate from manifest to\\n         # manifestlog. It allows bundlerepo and unionrepo to intercept the\\n         # manifest creation.\\n         return manifest.manifestrevlog(self.svfs)\\n \\n     @storecache('00manifest.i')\\n     def manifestlog(self):\\n         return manifest.manifestlog(self.svfs, self)\\n \\n     @repofilecache('dirstate')\\n     def dirstate(self):\\n         sparsematchfn = lambda: sparse.matcher(self)\\n \\n         return dirstate.dirstate(self.vfs, self.ui, self.root,\\n                                  self._dirstatevalidate, sparsematchfn)\\n \\n     def _dirstatevalidate(self, node):\\n         try:\\n             self.changelog.rev(node)\\n             return node\\n         except error.LookupError:\\n             if not self._dirstatevalidatewarned:\\n                 self._dirstatevalidatewarned = True\\n                 self.ui.warn(_(\\\"warning: ignoring unknown\\\"\\n                                \\\" working parent %s!\\\\n\\\") % short(node))\\n             return nullid\\n \\n     def __getitem__(self, changeid):\\n         if changeid is None:\\n             return context.workingctx(self)\\n         if isinstance(changeid, slice):\\n             # wdirrev isn't contiguous so the slice shouldn't include it\\n             return [context.changectx(self, i)\\n                     for i in xrange(*changeid.indices(len(self)))\\n                     if i not in self.changelog.filteredrevs]\\n         try:\\n             return context.changectx(self, changeid)\\n         except error.WdirUnsupported:\\n             return context.workingctx(self)\\n \\n     def __contains__(self, changeid):\\n         \\\"\\\"\\\"True if the given changeid exists\\n \\n         error.LookupError is raised if an ambiguous node specified.\\n         \\\"\\\"\\\"\\n         try:\\n             self[changeid]\\n             return True\\n         except error.RepoLookupError:\\n             return False\\n \\n     def __nonzero__(self):\\n         return True\\n \\n     __bool__ = __nonzero__\\n \\n     def __len__(self):\\n         return len(self.changelog)\\n \\n     def __iter__(self):\\n         return iter(self.changelog)\\n \\n     def revs(self, expr, *args):\\n         '''Find revisions matching a revset.\\n \\n         The revset is specified as a string ``expr`` that may contain\\n         %-formatting to escape certain types. See ``revsetlang.formatspec``.\\n \\n         Revset aliases from the configuration are not expanded. To expand\\n         user aliases, consider calling ``scmutil.revrange()`` or\\n         ``repo.anyrevs([expr], user=True)``.\\n \\n         Returns a revset.abstractsmartset, which is a list-like interface\\n         that contains integer revisions.\\n         '''\\n         expr = revsetlang.formatspec(expr, *args)\\n         m = revset.match(None, expr)\\n         return m(self)\\n \\n     def set(self, expr, *args):\\n         '''Find revisions matching a revset and emit changectx instances.\\n \\n         This is a convenience wrapper around ``revs()`` that iterates the\\n         result and is a generator of changectx instances.\\n \\n         Revset aliases from the configuration are not expanded. To expand\\n         user aliases, consider calling ``scmutil.revrange()``.\\n         '''\\n         for r in self.revs(expr, *args):\\n             yield self[r]\\n \\n     def anyrevs(self, specs, user=False, localalias=None):\\n         '''Find revisions matching one of the given revsets.\\n \\n         Revset aliases from the configuration are not expanded by default. To\\n         expand user aliases, specify ``user=True``. To provide some local\\n         definitions overriding user aliases, set ``localalias`` to\\n         ``{name: definitionstring}``.\\n         '''\\n         if user:\\n             m = revset.matchany(self.ui, specs, repo=self,\\n                                 localalias=localalias)\\n         else:\\n             m = revset.matchany(None, specs, localalias=localalias)\\n         return m(self)\\n \\n     def url(self):\\n         return 'file:' + self.root\\n \\n     def hook(self, name, throw=False, **args):\\n         \\\"\\\"\\\"Call a hook, passing this repo instance.\\n \\n         This a convenience method to aid invoking hooks. Extensions likely\\n         won't call this unless they have registered a custom hook or are\\n         replacing code that is expected to call a hook.\\n         \\\"\\\"\\\"\\n         return hook.hook(self.ui, self, name, throw, **args)\\n \\n     @filteredpropertycache\\n     def _tagscache(self):\\n         '''Returns a tagscache object that contains various tags related\\n         caches.'''\\n \\n         # This simplifies its cache management by having one decorated\\n         # function (this one) and the rest simply fetch things from it.\\n         class tagscache(object):\\n             def __init__(self):\\n                 # These two define the set of tags for this repository. tags\\n                 # maps tag name to node; tagtypes maps tag name to 'global' or\\n                 # 'local'. (Global tags are defined by .hgtags across all\\n                 # heads, and local tags are defined in .hg\\/localtags.)\\n                 # They constitute the in-memory cache of tags.\\n                 self.tags = self.tagtypes = None\\n \\n                 self.nodetagscache = self.tagslist = None\\n \\n         cache = tagscache()\\n         cache.tags, cache.tagtypes = self._findtags()\\n \\n         return cache\\n \\n     def tags(self):\\n         '''return a mapping of tag to node'''\\n         t = {}\\n         if self.changelog.filteredrevs:\\n             tags, tt = self._findtags()\\n         else:\\n             tags = self._tagscache.tags\\n         for k, v in tags.iteritems():\\n             try:\\n                 # ignore tags to unknown nodes\\n                 self.changelog.rev(v)\\n                 t[k] = v\\n             except (error.LookupError, ValueError):\\n                 pass\\n         return t\\n \\n     def _findtags(self):\\n         '''Do the hard work of finding tags.  Return a pair of dicts\\n         (tags, tagtypes) where tags maps tag name to node, and tagtypes\\n         maps tag name to a string like \\\\'global\\\\' or \\\\'local\\\\'.\\n         Subclasses or extensions are free to add their own tags, but\\n         should be aware that the returned dicts will be retained for the\\n         duration of the localrepo object.'''\\n \\n         # XXX what tagtype should subclasses\\/extensions use?  Currently\\n         # mq and bookmarks add tags, but do not set the tagtype at all.\\n         # Should each extension invent its own tag type?  Should there\\n         # be one tagtype for all such \\\"virtual\\\" tags?  Or is the status\\n         # quo fine?\\n \\n \\n         # map tag name to (node, hist)\\n         alltags = tagsmod.findglobaltags(self.ui, self)\\n         # map tag name to tag type\\n         tagtypes = dict((tag, 'global') for tag in alltags)\\n \\n         tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)\\n \\n         # Build the return dicts.  Have to re-encode tag names because\\n         # the tags module always uses UTF-8 (in order not to lose info\\n         # writing to the cache), but the rest of Mercurial wants them in\\n         # local encoding.\\n         tags = {}\\n         for (name, (node, hist)) in alltags.iteritems():\\n             if node != nullid:\\n                 tags[encoding.tolocal(name)] = node\\n         tags['tip'] = self.changelog.tip()\\n         tagtypes = dict([(encoding.tolocal(name), value)\\n                          for (name, value) in tagtypes.iteritems()])\\n         return (tags, tagtypes)\\n \\n     def tagtype(self, tagname):\\n         '''\\n         return the type of the given tag. result can be:\\n \\n         'local'  : a local tag\\n         'global' : a global tag\\n         None     : tag does not exist\\n         '''\\n \\n         return self._tagscache.tagtypes.get(tagname)\\n \\n     def tagslist(self):\\n         '''return a list of tags ordered by revision'''\\n         if not self._tagscache.tagslist:\\n             l = []\\n             for t, n in self.tags().iteritems():\\n                 l.append((self.changelog.rev(n), t, n))\\n             self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]\\n \\n         return self._tagscache.tagslist\\n \\n     def nodetags(self, node):\\n         '''return the tags associated with a node'''\\n         if not self._tagscache.nodetagscache:\\n             nodetagscache = {}\\n             for t, n in self._tagscache.tags.iteritems():\\n                 nodetagscache.setdefault(n, []).append(t)\\n             for tags in nodetagscache.itervalues():\\n                 tags.sort()\\n             self._tagscache.nodetagscache = nodetagscache\\n         return self._tagscache.nodetagscache.get(node, [])\\n \\n     def nodebookmarks(self, node):\\n         \\\"\\\"\\\"return the list of bookmarks pointing to the specified node\\\"\\\"\\\"\\n         marks = []\\n         for bookmark, n in self._bookmarks.iteritems():\\n             if n == node:\\n                 marks.append(bookmark)\\n         return sorted(marks)\\n \\n     def branchmap(self):\\n         '''returns a dictionary {branch: [branchheads]} with branchheads\\n         ordered by increasing revision number'''\\n         branchmap.updatecache(self)\\n         return self._branchcaches[self.filtername]\\n \\n     @unfilteredmethod\\n     def revbranchcache(self):\\n         if not self._revbranchcache:\\n             self._revbranchcache = branchmap.revbranchcache(self.unfiltered())\\n         return self._revbranchcache\\n \\n     def branchtip(self, branch, ignoremissing=False):\\n         '''return the tip node for a given branch\\n \\n         If ignoremissing is True, then this method will not raise an error.\\n         This is helpful for callers that only expect None for a missing branch\\n         (e.g. namespace).\\n \\n         '''\\n         try:\\n             return self.branchmap().branchtip(branch)\\n         except KeyError:\\n             if not ignoremissing:\\n                 raise error.RepoLookupError(_(\\\"unknown branch '%s'\\\") % branch)\\n             else:\\n                 pass\\n \\n     def lookup(self, key):\\n         return self[key].node()\\n \\n     def lookupbranch(self, key, remote=None):\\n         repo = remote or self\\n         if key in repo.branchmap():\\n             return key\\n \\n         repo = (remote and remote.local()) and remote or self\\n         return repo[key].branch()\\n \\n     def known(self, nodes):\\n         cl = self.changelog\\n         nm = cl.nodemap\\n         filtered = cl.filteredrevs\\n         result = []\\n         for n in nodes:\\n             r = nm.get(n)\\n             resp = not (r is None or r in filtered)\\n             result.append(resp)\\n         return result\\n \\n     def local(self):\\n         return self\\n \\n     def publishing(self):\\n         # it's safe (and desirable) to trust the publish flag unconditionally\\n         # so that we don't finalize changes shared between users via ssh or nfs\\n         return self.ui.configbool('phases', 'publish', untrusted=True)\\n \\n     def cancopy(self):\\n         # so statichttprepo's override of local() works\\n         if not self.local():\\n             return False\\n         if not self.publishing():\\n             return True\\n         # if publishing we can't copy if there is filtered content\\n         return not self.filtered('visible').changelog.filteredrevs\\n \\n     def shared(self):\\n         '''the type of shared repository (None if not shared)'''\\n         if self.sharedpath != self.path:\\n             return 'store'\\n         return None\\n \\n     def wjoin(self, f, *insidef):\\n         return self.vfs.reljoin(self.root, f, *insidef)\\n \\n     def file(self, f):\\n         if f[0] == '\\/':\\n             f = f[1:]\\n         return filelog.filelog(self.svfs, f)\\n \\n     def changectx(self, changeid):\\n         return self[changeid]\\n \\n     def setparents(self, p1, p2=nullid):\\n         with self.dirstate.parentchange():\\n             copies = self.dirstate.setparents(p1, p2)\\n             pctx = self[p1]\\n             if copies:\\n                 # Adjust copy records, the dirstate cannot do it, it\\n                 # requires access to parents manifests. Preserve them\\n                 # only for entries added to first parent.\\n                 for f in copies:\\n                     if f not in pctx and copies[f] in pctx:\\n                         self.dirstate.copy(copies[f], f)\\n             if p2 == nullid:\\n                 for f, s in sorted(self.dirstate.copies().items()):\\n                     if f not in pctx and s not in pctx:\\n                         self.dirstate.copy(None, f)\\n \\n     def filectx(self, path, changeid=None, fileid=None):\\n         \\\"\\\"\\\"changeid can be a changeset revision, node, or tag.\\n            fileid can be a file revision or node.\\\"\\\"\\\"\\n         return context.filectx(self, path, changeid, fileid)\\n \\n     def getcwd(self):\\n         return self.dirstate.getcwd()\\n \\n     def pathto(self, f, cwd=None):\\n         return self.dirstate.pathto(f, cwd)\\n \\n     def _loadfilter(self, filter):\\n         if filter not in self.filterpats:\\n             l = []\\n             for pat, cmd in self.ui.configitems(filter):\\n                 if cmd == '!':\\n                     continue\\n                 mf = matchmod.match(self.root, '', [pat])\\n                 fn = None\\n                 params = cmd\\n                 for name, filterfn in self._datafilters.iteritems():\\n                     if cmd.startswith(name):\\n                         fn = filterfn\\n                         params = cmd[len(name):].lstrip()\\n                         break\\n                 if not fn:\\n                     fn = lambda s, c, **kwargs: util.filter(s, c)\\n                 # Wrap old filters not supporting keyword arguments\\n                 if not inspect.getargspec(fn)[2]:\\n                     oldfn = fn\\n                     fn = lambda s, c, **kwargs: oldfn(s, c)\\n                 l.append((mf, fn, params))\\n             self.filterpats[filter] = l\\n         return self.filterpats[filter]\\n \\n     def _filter(self, filterpats, filename, data):\\n         for mf, fn, cmd in filterpats:\\n             if mf(filename):\\n                 self.ui.debug(\\\"filtering %s through %s\\\\n\\\" % (filename, cmd))\\n                 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)\\n                 break\\n \\n         return data\\n \\n     @unfilteredpropertycache\\n     def _encodefilterpats(self):\\n         return self._loadfilter('encode')\\n \\n     @unfilteredpropertycache\\n     def _decodefilterpats(self):\\n         return self._loadfilter('decode')\\n \\n     def adddatafilter(self, name, filter):\\n         self._datafilters[name] = filter\\n \\n     def wread(self, filename):\\n         if self.wvfs.islink(filename):\\n             data = self.wvfs.readlink(filename)\\n         else:\\n             data = self.wvfs.read(filename)\\n         return self._filter(self._encodefilterpats, filename, data)\\n \\n     def wwrite(self, filename, data, flags, backgroundclose=False):\\n         \\\"\\\"\\\"write ``data`` into ``filename`` in the working directory\\n \\n         This returns length of written (maybe decoded) data.\\n         \\\"\\\"\\\"\\n         data = self._filter(self._decodefilterpats, filename, data)\\n         if 'l' in flags:\\n             self.wvfs.symlink(data, filename)\\n         else:\\n             self.wvfs.write(filename, data, backgroundclose=backgroundclose)\\n             if 'x' in flags:\\n                 self.wvfs.setflags(filename, False, True)\\n         return len(data)\\n \\n     def wwritedata(self, filename, data):\\n         return self._filter(self._decodefilterpats, filename, data)\\n \\n     def currenttransaction(self):\\n         \\\"\\\"\\\"return the current transaction or None if non exists\\\"\\\"\\\"\\n         if self._transref:\\n             tr = self._transref()\\n         else:\\n             tr = None\\n \\n         if tr and tr.running():\\n             return tr\\n         return None\\n \\n     def transaction(self, desc, report=None):\\n         if (self.ui.configbool('devel', 'all-warnings')\\n                 or self.ui.configbool('devel', 'check-locks')):\\n             if self._currentlock(self._lockref) is None:\\n                 raise error.ProgrammingError('transaction requires locking')\\n         tr = self.currenttransaction()\\n         if tr is not None:\\n             scmutil.registersummarycallback(self, tr, desc)\\n             return tr.nest()\\n \\n         # abort here if the journal already exists\\n         if self.svfs.exists(\\\"journal\\\"):\\n             raise error.RepoError(\\n                 _(\\\"abandoned transaction found\\\"),\\n                 hint=_(\\\"run 'hg recover' to clean up transaction\\\"))\\n \\n         idbase = \\\"%.40f#%f\\\" % (random.random(), time.time())\\n         ha = hex(hashlib.sha1(idbase).digest())\\n         txnid = 'TXN:' + ha\\n         self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)\\n \\n         self._writejournal(desc)\\n         renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]\\n         if report:\\n             rp = report\\n         else:\\n             rp = self.ui.warn\\n         vfsmap = {'plain': self.vfs} # root of .hg\\/\\n         # we must avoid cyclic reference between repo and transaction.\\n         reporef = weakref.ref(self)\\n         # Code to track tag movement\\n         #\\n         # Since tags are all handled as file content, it is actually quite hard\\n         # to track these movement from a code perspective. So we fallback to a\\n         # tracking at the repository level. One could envision to track changes\\n         # to the '.hgtags' file through changegroup apply but that fails to\\n         # cope with case where transaction expose new heads without changegroup\\n         # being involved (eg: phase movement).\\n         #\\n         # For now, We gate the feature behind a flag since this likely comes\\n         # with performance impacts. The current code run more often than needed\\n         # and do not use caches as much as it could.  The current focus is on\\n         # the behavior of the feature so we disable it by default. The flag\\n         # will be removed when we are happy with the performance impact.\\n         #\\n         # Once this feature is no longer experimental move the following\\n         # documentation to the appropriate help section:\\n         #\\n         # The ``HG_TAG_MOVED`` variable will be set if the transaction touched\\n         # tags (new or changed or deleted tags). In addition the details of\\n         # these changes are made available in a file at:\\n         #     ``REPOROOT\\/.hg\\/changes\\/tags.changes``.\\n         # Make sure you check for HG_TAG_MOVED before reading that file as it\\n         # might exist from a previous transaction even if no tag were touched\\n         # in this one. Changes are recorded in a line base format::\\n         #\\n         #     \\u003caction\\u003e \\u003chex-node\\u003e \\u003ctag-name\\u003e\\\\n\\n         #\\n         # Actions are defined as follow:\\n         #   \\\"-R\\\": tag is removed,\\n         #   \\\"+A\\\": tag is added,\\n         #   \\\"-M\\\": tag is moved (old value),\\n         #   \\\"+M\\\": tag is moved (new value),\\n         tracktags = lambda x: None\\n         # experimental config: experimental.hook-track-tags\\n         shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')\\n         if desc != 'strip' and shouldtracktags:\\n             oldheads = self.changelog.headrevs()\\n             def tracktags(tr2):\\n                 repo = reporef()\\n                 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)\\n                 newheads = repo.changelog.headrevs()\\n                 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)\\n                 # notes: we compare lists here.\\n                 # As we do it only once buiding set would not be cheaper\\n                 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)\\n                 if changes:\\n                     tr2.hookargs['tag_moved'] = '1'\\n                     with repo.vfs('changes\\/tags.changes', 'w',\\n                                   atomictemp=True) as changesfile:\\n                         # note: we do not register the file to the transaction\\n                         # because we needs it to still exist on the transaction\\n                         # is close (for txnclose hooks)\\n                         tagsmod.writediff(changesfile, changes)\\n         def validate(tr2):\\n             \\\"\\\"\\\"will run pre-closing hooks\\\"\\\"\\\"\\n             # XXX the transaction API is a bit lacking here so we take a hacky\\n             # path for now\\n             #\\n             # We cannot add this as a \\\"pending\\\" hooks since the 'tr.hookargs'\\n             # dict is copied before these run. In addition we needs the data\\n             # available to in memory hooks too.\\n             #\\n             # Moreover, we also need to make sure this runs before txnclose\\n             # hooks and there is no \\\"pending\\\" mechanism that would execute\\n             # logic only if hooks are about to run.\\n             #\\n             # Fixing this limitation of the transaction is also needed to track\\n             # other families of changes (bookmarks, phases, obsolescence).\\n             #\\n             # This will have to be fixed before we remove the experimental\\n             # gating.\\n             tracktags(tr2)\\n             repo = reporef()\\n             if repo.ui.configbool('experimental', 'single-head-per-branch'):\\n                 scmutil.enforcesinglehead(repo, tr2, desc)\\n             if hook.hashook(repo.ui, 'pretxnclose-bookmark'):\\n                 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):\\n                     args = tr.hookargs.copy()\\n                     args.update(bookmarks.preparehookargs(name, old, new))\\n                     repo.hook('pretxnclose-bookmark', throw=True,\\n                               txnname=desc,\\n                               **pycompat.strkwargs(args))\\n             if hook.hashook(repo.ui, 'pretxnclose-phase'):\\n                 cl = repo.unfiltered().changelog\\n                 for rev, (old, new) in tr.changes['phases'].items():\\n                     args = tr.hookargs.copy()\\n                     node = hex(cl.node(rev))\\n                     args.update(phases.preparehookargs(node, old, new))\\n                     repo.hook('pretxnclose-phase', throw=True, txnname=desc,\\n                               **pycompat.strkwargs(args))\\n \\n             repo.hook('pretxnclose', throw=True,\\n                       txnname=desc, **pycompat.strkwargs(tr.hookargs))\\n         def releasefn(tr, success):\\n             repo = reporef()\\n             if success:\\n                 # this should be explicitly invoked here, because\\n                 # in-memory changes aren't written out at closing\\n                 # transaction, if tr.addfilegenerator (via\\n                 # dirstate.write or so) isn't invoked while\\n                 # transaction running\\n                 repo.dirstate.write(None)\\n             else:\\n                 # discard all changes (including ones already written\\n                 # out) in this transaction\\n                 repo.dirstate.restorebackup(None, 'journal.dirstate')\\n \\n                 repo.invalidate(clearfilecache=True)\\n \\n         tr = transaction.transaction(rp, self.svfs, vfsmap,\\n                                      \\\"journal\\\",\\n                                      \\\"undo\\\",\\n                                      aftertrans(renames),\\n                                      self.store.createmode,\\n                                      validator=validate,\\n                                      releasefn=releasefn,\\n                                      checkambigfiles=_cachedfiles)\\n         tr.changes['revs'] = set()\\n         tr.changes['obsmarkers'] = set()\\n         tr.changes['phases'] = {}\\n         tr.changes['bookmarks'] = {}\\n \\n         tr.hookargs['txnid'] = txnid\\n         # note: writing the fncache only during finalize mean that the file is\\n         # outdated when running hooks. As fncache is used for streaming clone,\\n         # this is not expected to break anything that happen during the hooks.\\n         tr.addfinalize('flush-fncache', self.store.write)\\n         def txnclosehook(tr2):\\n             \\\"\\\"\\\"To be run if transaction is successful, will schedule a hook run\\n             \\\"\\\"\\\"\\n             # Don't reference tr2 in hook() so we don't hold a reference.\\n             # This reduces memory consumption when there are multiple\\n             # transactions per lock. This can likely go away if issue5045\\n             # fixes the function accumulation.\\n             hookargs = tr2.hookargs\\n \\n             def hookfunc():\\n                 repo = reporef()\\n                 if hook.hashook(repo.ui, 'txnclose-bookmark'):\\n                     bmchanges = sorted(tr.changes['bookmarks'].items())\\n                     for name, (old, new) in bmchanges:\\n                         args = tr.hookargs.copy()\\n                         args.update(bookmarks.preparehookargs(name, old, new))\\n                         repo.hook('txnclose-bookmark', throw=False,\\n                                   txnname=desc, **pycompat.strkwargs(args))\\n \\n                 if hook.hashook(repo.ui, 'txnclose-phase'):\\n                     cl = repo.unfiltered().changelog\\n                     phasemv = sorted(tr.changes['phases'].items())\\n                     for rev, (old, new) in phasemv:\\n                         args = tr.hookargs.copy()\\n                         node = hex(cl.node(rev))\\n                         args.update(phases.preparehookargs(node, old, new))\\n                         repo.hook('txnclose-phase', throw=False, txnname=desc,\\n                                   **pycompat.strkwargs(args))\\n \\n                 repo.hook('txnclose', throw=False, txnname=desc,\\n                           **pycompat.strkwargs(hookargs))\\n             reporef()._afterlock(hookfunc)\\n         tr.addfinalize('txnclose-hook', txnclosehook)\\n         tr.addpostclose('warms-cache', self._buildcacheupdater(tr))\\n         def txnaborthook(tr2):\\n             \\\"\\\"\\\"To be run if transaction is aborted\\n             \\\"\\\"\\\"\\n             reporef().hook('txnabort', throw=False, txnname=desc,\\n                            **tr2.hookargs)\\n         tr.addabort('txnabort-hook', txnaborthook)\\n         # avoid eager cache invalidation. in-memory data should be identical\\n         # to stored data if transaction has no error.\\n         tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)\\n         self._transref = weakref.ref(tr)\\n         scmutil.registersummarycallback(self, tr, desc)\\n         return tr\\n \\n     def _journalfiles(self):\\n         return ((self.svfs, 'journal'),\\n                 (self.vfs, 'journal.dirstate'),\\n                 (self.vfs, 'journal.branch'),\\n                 (self.vfs, 'journal.desc'),\\n                 (self.vfs, 'journal.bookmarks'),\\n                 (self.svfs, 'journal.phaseroots'))\\n \\n     def undofiles(self):\\n         return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]\\n \\n     @unfilteredmethod\\n     def _writejournal(self, desc):\\n         self.dirstate.savebackup(None, 'journal.dirstate')\\n         self.vfs.write(\\\"journal.branch\\\",\\n                           encoding.fromlocal(self.dirstate.branch()))\\n         self.vfs.write(\\\"journal.desc\\\",\\n                           \\\"%d\\\\n%s\\\\n\\\" % (len(self), desc))\\n         self.vfs.write(\\\"journal.bookmarks\\\",\\n                           self.vfs.tryread(\\\"bookmarks\\\"))\\n         self.svfs.write(\\\"journal.phaseroots\\\",\\n                            self.svfs.tryread(\\\"phaseroots\\\"))\\n \\n     def recover(self):\\n         with self.lock():\\n             if self.svfs.exists(\\\"journal\\\"):\\n                 self.ui.status(_(\\\"rolling back interrupted transaction\\\\n\\\"))\\n                 vfsmap = {'': self.svfs,\\n                           'plain': self.vfs,}\\n                 transaction.rollback(self.svfs, vfsmap, \\\"journal\\\",\\n                                      self.ui.warn,\\n                                      checkambigfiles=_cachedfiles)\\n                 self.invalidate()\\n                 return True\\n             else:\\n                 self.ui.warn(_(\\\"no interrupted transaction available\\\\n\\\"))\\n                 return False\\n \\n     def rollback(self, dryrun=False, force=False):\\n         wlock = lock = dsguard = None\\n         try:\\n             wlock = self.wlock()\\n             lock = self.lock()\\n             if self.svfs.exists(\\\"undo\\\"):\\n                 dsguard = dirstateguard.dirstateguard(self, 'rollback')\\n \\n                 return self._rollback(dryrun, force, dsguard)\\n             else:\\n                 self.ui.warn(_(\\\"no rollback information available\\\\n\\\"))\\n                 return 1\\n         finally:\\n             release(dsguard, lock, wlock)\\n \\n     @unfilteredmethod # Until we get smarter cache management\\n     def _rollback(self, dryrun, force, dsguard):\\n         ui = self.ui\\n         try:\\n             args = self.vfs.read('undo.desc').splitlines()\\n             (oldlen, desc, detail) = (int(args[0]), args[1], None)\\n             if len(args) \\u003e= 3:\\n                 detail = args[2]\\n             oldtip = oldlen - 1\\n \\n             if detail and ui.verbose:\\n                 msg = (_('repository tip rolled back to revision %d'\\n                          ' (undo %s: %s)\\\\n')\\n                        % (oldtip, desc, detail))\\n             else:\\n                 msg = (_('repository tip rolled back to revision %d'\\n                          ' (undo %s)\\\\n')\\n                        % (oldtip, desc))\\n         except IOError:\\n             msg = _('rolling back unknown transaction\\\\n')\\n             desc = None\\n \\n         if not force and self['.'] != self['tip'] and desc == 'commit':\\n             raise error.Abort(\\n                 _('rollback of last commit while not checked out '\\n                   'may lose data'), hint=_('use -f to force'))\\n \\n         ui.status(msg)\\n         if dryrun:\\n             return 0\\n \\n         parents = self.dirstate.parents()\\n         self.destroying()\\n         vfsmap = {'plain': self.vfs, '': self.svfs}\\n         transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,\\n                              checkambigfiles=_cachedfiles)\\n         if self.vfs.exists('undo.bookmarks'):\\n             self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)\\n         if self.svfs.exists('undo.phaseroots'):\\n             self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)\\n         self.invalidate()\\n \\n         parentgone = (parents[0] not in self.changelog.nodemap or\\n                       parents[1] not in self.changelog.nodemap)\\n         if parentgone:\\n             # prevent dirstateguard from overwriting already restored one\\n             dsguard.close()\\n \\n             self.dirstate.restorebackup(None, 'undo.dirstate')\\n             try:\\n                 branch = self.vfs.read('undo.branch')\\n                 self.dirstate.setbranch(encoding.tolocal(branch))\\n             except IOError:\\n                 ui.warn(_('named branch could not be reset: '\\n                           'current branch is still \\\\'%s\\\\'\\\\n')\\n                         % self.dirstate.branch())\\n \\n             parents = tuple([p.rev() for p in self[None].parents()])\\n             if len(parents) \\u003e 1:\\n                 ui.status(_('working directory now based on '\\n                             'revisions %d and %d\\\\n') % parents)\\n             else:\\n                 ui.status(_('working directory now based on '\\n                             'revision %d\\\\n') % parents)\\n             mergemod.mergestate.clean(self, self['.'].node())\\n \\n         # TODO: if we know which new heads may result from this rollback, pass\\n         # them to destroy(), which will prevent the branchhead cache from being\\n         # invalidated.\\n         self.destroyed()\\n         return 0\\n \\n     def _buildcacheupdater(self, newtransaction):\\n         \\\"\\\"\\\"called during transaction to build the callback updating cache\\n \\n         Lives on the repository to help extension who might want to augment\\n         this logic. For this purpose, the created transaction is passed to the\\n         method.\\n         \\\"\\\"\\\"\\n         # we must avoid cyclic reference between repo and transaction.\\n         reporef = weakref.ref(self)\\n         def updater(tr):\\n             repo = reporef()\\n             repo.updatecaches(tr)\\n         return updater\\n \\n     @unfilteredmethod\\n     def updatecaches(self, tr=None):\\n         \\\"\\\"\\\"warm appropriate caches\\n \\n         If this function is called after a transaction closed. The transaction\\n         will be available in the 'tr' argument. This can be used to selectively\\n         update caches relevant to the changes in that transaction.\\n         \\\"\\\"\\\"\\n         if tr is not None and tr.hookargs.get('source') == 'strip':\\n             # During strip, many caches are invalid but\\n             # later call to `destroyed` will refresh them.\\n             return\\n \\n         if tr is None or tr.changes['revs']:\\n             # updating the unfiltered branchmap should refresh all the others,\\n             self.ui.debug('updating the branch cache\\\\n')\\n             branchmap.updatecache(self.filtered('served'))\\n \\n     def invalidatecaches(self):\\n \\n         if '_tagscache' in vars(self):\\n             # can't use delattr on proxy\\n             del self.__dict__['_tagscache']\\n \\n         self.unfiltered()._branchcaches.clear()\\n         self.invalidatevolatilesets()\\n         self._sparsesignaturecache.clear()\\n \\n     def invalidatevolatilesets(self):\\n         self.filteredrevcache.clear()\\n         obsolete.clearobscaches(self)\\n \\n     def invalidatedirstate(self):\\n         '''Invalidates the dirstate, causing the next call to dirstate\\n         to check if it was modified since the last time it was read,\\n         rereading it if it has.\\n \\n         This is different to dirstate.invalidate() that it doesn't always\\n         rereads the dirstate. Use dirstate.invalidate() if you want to\\n         explicitly read the dirstate again (i.e. restoring it to a previous\\n         known good state).'''\\n         if hasunfilteredcache(self, 'dirstate'):\\n             for k in self.dirstate._filecache:\\n                 try:\\n                     delattr(self.dirstate, k)\\n                 except AttributeError:\\n                     pass\\n             delattr(self.unfiltered(), 'dirstate')\\n \\n     def invalidate(self, clearfilecache=False):\\n         '''Invalidates both store and non-store parts other than dirstate\\n \\n         If a transaction is running, invalidation of store is omitted,\\n         because discarding in-memory changes might cause inconsistency\\n         (e.g. incomplete fncache causes unintentional failure, but\\n         redundant one doesn't).\\n         '''\\n         unfiltered = self.unfiltered() # all file caches are stored unfiltered\\n         for k in list(self._filecache.keys()):\\n             # dirstate is invalidated separately in invalidatedirstate()\\n             if k == 'dirstate':\\n                 continue\\n             if (k == 'changelog' and\\n                 self.currenttransaction() and\\n                 self.changelog._delayed):\\n                 # The changelog object may store unwritten revisions. We don't\\n                 # want to lose them.\\n                 # TODO: Solve the problem instead of working around it.\\n                 continue\\n \\n             if clearfilecache:\\n                 del self._filecache[k]\\n             try:\\n                 delattr(unfiltered, k)\\n             except AttributeError:\\n                 pass\\n         self.invalidatecaches()\\n         if not self.currenttransaction():\\n             # TODO: Changing contents of store outside transaction\\n             # causes inconsistency. We should make in-memory store\\n             # changes detectable, and abort if changed.\\n             self.store.invalidatecaches()\\n \\n     def invalidateall(self):\\n         '''Fully invalidates both store and non-store parts, causing the\\n         subsequent operation to reread any outside changes.'''\\n         # extension should hook this to invalidate its caches\\n         self.invalidate()\\n         self.invalidatedirstate()\\n \\n     @unfilteredmethod\\n     def _refreshfilecachestats(self, tr):\\n         \\\"\\\"\\\"Reload stats of cached files so that they are flagged as valid\\\"\\\"\\\"\\n         for k, ce in self._filecache.items():\\n             if k == 'dirstate' or k not in self.__dict__:\\n                 continue\\n             ce.refresh()\\n \\n     def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,\\n               inheritchecker=None, parentenvvar=None):\\n         parentlock = None\\n         # the contents of parentenvvar are used by the underlying lock to\\n         # determine whether it can be inherited\\n         if parentenvvar is not None:\\n             parentlock = encoding.environ.get(parentenvvar)\\n         try:\\n             l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,\\n                              acquirefn=acquirefn, desc=desc,\\n                              inheritchecker=inheritchecker,\\n                              parentlock=parentlock)\\n         except error.LockHeld as inst:\\n             if not wait:\\n                 raise\\n             # show more details for new-style locks\\n             if ':' in inst.locker:\\n                 host, pid = inst.locker.split(\\\":\\\", 1)\\n                 self.ui.warn(\\n                     _(\\\"waiting for lock on %s held by process %r \\\"\\n                       \\\"on host %r\\\\n\\\") % (desc, pid, host))\\n             else:\\n                 self.ui.warn(_(\\\"waiting for lock on %s held by %r\\\\n\\\") %\\n                              (desc, inst.locker))\\n             # default to 600 seconds timeout\\n             l = lockmod.lock(vfs, lockname,\\n                              int(self.ui.config(\\\"ui\\\", \\\"timeout\\\")),\\n                              releasefn=releasefn, acquirefn=acquirefn,\\n                              desc=desc)\\n             self.ui.warn(_(\\\"got lock after %s seconds\\\\n\\\") % l.delay)\\n         return l\\n \\n     def _afterlock(self, callback):\\n         \\\"\\\"\\\"add a callback to be run when the repository is fully unlocked\\n \\n         The callback will be executed when the outermost lock is released\\n         (with wlock being higher level than 'lock').\\\"\\\"\\\"\\n         for ref in (self._wlockref, self._lockref):\\n             l = ref and ref()\\n             if l and l.held:\\n                 l.postrelease.append(callback)\\n                 break\\n         else: # no lock have been found.\\n             callback()\\n \\n     def lock(self, wait=True):\\n         '''Lock the repository store (.hg\\/store) and return a weak reference\\n         to the lock. Use this before modifying the store (e.g. committing or\\n         stripping). If you are opening a transaction, get a lock as well.)\\n \\n         If both 'lock' and 'wlock' must be acquired, ensure you always acquires\\n         'wlock' first to avoid a dead-lock hazard.'''\\n         l = self._currentlock(self._lockref)\\n         if l is not None:\\n             l.lock()\\n             return l\\n \\n         l = self._lock(self.svfs, \\\"lock\\\", wait, None,\\n                        self.invalidate, _('repository %s') % self.origroot)\\n         self._lockref = weakref.ref(l)\\n         return l\\n \\n     def _wlockchecktransaction(self):\\n         if self.currenttransaction() is not None:\\n             raise error.LockInheritanceContractViolation(\\n                 'wlock cannot be inherited in the middle of a transaction')\\n \\n     def wlock(self, wait=True):\\n         '''Lock the non-store parts of the repository (everything under\\n         .hg except .hg\\/store) and return a weak reference to the lock.\\n \\n         Use this before modifying files in .hg.\\n \\n         If both 'lock' and 'wlock' must be acquired, ensure you always acquires\\n         'wlock' first to avoid a dead-lock hazard.'''\\n         l = self._wlockref and self._wlockref()\\n         if l is not None and l.held:\\n             l.lock()\\n             return l\\n \\n         # We do not need to check for non-waiting lock acquisition.  Such\\n         # acquisition would not cause dead-lock as they would just fail.\\n         if wait and (self.ui.configbool('devel', 'all-warnings')\\n                      or self.ui.configbool('devel', 'check-locks')):\\n             if self._currentlock(self._lockref) is not None:\\n                 self.ui.develwarn('\\\"wlock\\\" acquired after \\\"lock\\\"')\\n \\n         def unlock():\\n             if self.dirstate.pendingparentchange():\\n                 self.dirstate.invalidate()\\n             else:\\n                 self.dirstate.write(None)\\n \\n             self._filecache['dirstate'].refresh()\\n \\n         l = self._lock(self.vfs, \\\"wlock\\\", wait, unlock,\\n                        self.invalidatedirstate, _('working directory of %s') %\\n                        self.origroot,\\n                        inheritchecker=self._wlockchecktransaction,\\n                        parentenvvar='HG_WLOCK_LOCKER')\\n         self._wlockref = weakref.ref(l)\\n         return l\\n \\n     def _currentlock(self, lockref):\\n         \\\"\\\"\\\"Returns the lock if it's held, or None if it's not.\\\"\\\"\\\"\\n         if lockref is None:\\n             return None\\n         l = lockref()\\n         if l is None or not l.held:\\n             return None\\n         return l\\n \\n     def currentwlock(self):\\n         \\\"\\\"\\\"Returns the wlock if it's held, or None if it's not.\\\"\\\"\\\"\\n         return self._currentlock(self._wlockref)\\n \\n     def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):\\n         \\\"\\\"\\\"\\n         commit an individual file as part of a larger transaction\\n         \\\"\\\"\\\"\\n \\n         fname = fctx.path()\\n         fparent1 = manifest1.get(fname, nullid)\\n         fparent2 = manifest2.get(fname, nullid)\\n         if isinstance(fctx, context.filectx):\\n             node = fctx.filenode()\\n             if node in [fparent1, fparent2]:\\n                 self.ui.debug('reusing %s filelog entry\\\\n' % fname)\\n                 if manifest1.flags(fname) != fctx.flags():\\n                     changelist.append(fname)\\n                 return node\\n \\n         flog = self.file(fname)\\n         meta = {}\\n         copy = fctx.renamed()\\n         if copy and copy[0] != fname:\\n             # Mark the new revision of this file as a copy of another\\n             # file.  This copy data will effectively act as a parent\\n             # of this new revision.  If this is a merge, the first\\n             # parent will be the nullid (meaning \\\"look up the copy data\\\")\\n             # and the second one will be the other parent.  For example:\\n             #\\n             # 0 --- 1 --- 3   rev1 changes file foo\\n             #   \\\\       \\/     rev2 renames foo to bar and changes it\\n             #    \\\\- 2 -\\/      rev3 should have bar with all changes and\\n             #                      should record that bar descends from\\n             #                      bar in rev2 and foo in rev1\\n             #\\n             # this allows this merge to succeed:\\n             #\\n             # 0 --- 1 --- 3   rev4 reverts the content change from rev2\\n             #   \\\\       \\/     merging rev3 and rev4 should use bar@rev2\\n             #    \\\\- 2 --- 4        as the merge base\\n             #\\n \\n             cfname = copy[0]\\n             crev = manifest1.get(cfname)\\n             newfparent = fparent2\\n \\n             if manifest2: # branch merge\\n                 if fparent2 == nullid or crev is None: # copied on remote side\\n                     if cfname in manifest2:\\n                         crev = manifest2[cfname]\\n                         newfparent = fparent1\\n \\n             # Here, we used to search backwards through history to try to find\\n             # where the file copy came from if the source of a copy was not in\\n             # the parent directory. However, this doesn't actually make sense to\\n             # do (what does a copy from something not in your working copy even\\n             # mean?) and it causes bugs (eg, issue4476). Instead, we will warn\\n             # the user that copy information was dropped, so if they didn't\\n             # expect this outcome it can be fixed, but this is the correct\\n             # behavior in this circumstance.\\n \\n             if crev:\\n                 self.ui.debug(\\\" %s: copy %s:%s\\\\n\\\" % (fname, cfname, hex(crev)))\\n                 meta[\\\"copy\\\"] = cfname\\n                 meta[\\\"copyrev\\\"] = hex(crev)\\n                 fparent1, fparent2 = nullid, newfparent\\n             else:\\n                 self.ui.warn(_(\\\"warning: can't find ancestor for '%s' \\\"\\n                                \\\"copied from '%s'!\\\\n\\\") % (fname, cfname))\\n \\n         elif fparent1 == nullid:\\n             fparent1, fparent2 = fparent2, nullid\\n         elif fparent2 != nullid:\\n             # is one parent an ancestor of the other?\\n             fparentancestors = flog.commonancestorsheads(fparent1, fparent2)\\n             if fparent1 in fparentancestors:\\n                 fparent1, fparent2 = fparent2, nullid\\n             elif fparent2 in fparentancestors:\\n                 fparent2 = nullid\\n \\n         # is the file changed?\\n         text = fctx.data()\\n         if fparent2 != nullid or flog.cmp(fparent1, text) or meta:\\n             changelist.append(fname)\\n             return flog.add(text, meta, tr, linkrev, fparent1, fparent2)\\n         # are just the flags changed during merge?\\n         elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():\\n             changelist.append(fname)\\n \\n         return fparent1\\n \\n     def checkcommitpatterns(self, wctx, vdirs, match, status, fail):\\n         \\\"\\\"\\\"check for commit arguments that aren't committable\\\"\\\"\\\"\\n         if match.isexact() or match.prefix():\\n             matched = set(status.modified + status.added + status.removed)\\n \\n             for f in match.files():\\n                 f = self.dirstate.normalize(f)\\n                 if f == '.' or f in matched or f in wctx.substate:\\n                     continue\\n                 if f in status.deleted:\\n                     fail(f, _('file not found!'))\\n                 if f in vdirs: # visited directory\\n                     d = f + '\\/'\\n                     for mf in matched:\\n                         if mf.startswith(d):\\n                             break\\n                     else:\\n                         fail(f, _(\\\"no match under directory!\\\"))\\n                 elif f not in self.dirstate:\\n                     fail(f, _(\\\"file not tracked!\\\"))\\n \\n     @unfilteredmethod\\n     def commit(self, text=\\\"\\\", user=None, date=None, match=None, force=False,\\n                editor=False, extra=None):\\n         \\\"\\\"\\\"Add a new revision to current repository.\\n \\n         Revision information is gathered from the working directory,\\n         match can be used to filter the committed files. If editor is\\n         supplied, it is called to get a commit message.\\n         \\\"\\\"\\\"\\n         if extra is None:\\n             extra = {}\\n \\n         def fail(f, msg):\\n             raise error.Abort('%s: %s' % (f, msg))\\n \\n         if not match:\\n             match = matchmod.always(self.root, '')\\n \\n         if not force:\\n             vdirs = []\\n             match.explicitdir = vdirs.append\\n             match.bad = fail\\n \\n         wlock = lock = tr = None\\n         try:\\n             wlock = self.wlock()\\n             lock = self.lock() # for recent changelog (see issue4368)\\n \\n             wctx = self[None]\\n             merge = len(wctx.parents()) \\u003e 1\\n \\n             if not force and merge and not match.always():\\n                 raise error.Abort(_('cannot partially commit a merge '\\n                                    '(do not specify files or patterns)'))\\n \\n             status = self.status(match=match, clean=force)\\n             if force:\\n                 status.modified.extend(status.clean) # mq may commit clean files\\n \\n             # check subrepos\\n             subs, commitsubs, newstate = subrepo.precommit(\\n                 self.ui, wctx, status, match, force=force)\\n \\n             # make sure all explicit patterns are matched\\n             if not force:\\n                 self.checkcommitpatterns(wctx, vdirs, match, status, fail)\\n \\n             cctx = context.workingcommitctx(self, status,\\n                                             text, user, date, extra)\\n \\n             # internal config: ui.allowemptycommit\\n             allowemptycommit = (wctx.branch() != wctx.p1().branch()\\n                                 or extra.get('close') or merge or cctx.files()\\n                                 or self.ui.configbool('ui', 'allowemptycommit'))\\n             if not allowemptycommit:\\n                 return None\\n \\n             if merge and cctx.deleted():\\n                 raise error.Abort(_(\\\"cannot commit merge with missing files\\\"))\\n \\n             ms = mergemod.mergestate.read(self)\\n             mergeutil.checkunresolved(ms)\\n \\n             if editor:\\n                 cctx._text = editor(self, cctx, subs)\\n             edited = (text != cctx._text)\\n \\n             # Save commit message in case this transaction gets rolled back\\n             # (e.g. by a pretxncommit hook).  Leave the content alone on\\n             # the assumption that the user will use the same editor again.\\n             msgfn = self.savecommitmessage(cctx._text)\\n \\n             # commit subs and write new state\\n             if subs:\\n                 for s in sorted(commitsubs):\\n                     sub = wctx.sub(s)\\n                     self.ui.status(_('committing subrepository %s\\\\n') %\\n                         subrepo.subrelpath(sub))\\n                     sr = sub.commit(cctx._text, user, date)\\n                     newstate[s] = (newstate[s][0], sr)\\n                 subrepo.writestate(self, newstate)\\n \\n             p1, p2 = self.dirstate.parents()\\n             hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')\\n             try:\\n                 self.hook(\\\"precommit\\\", throw=True, parent1=hookp1,\\n                           parent2=hookp2)\\n                 tr = self.transaction('commit')\\n                 ret = self.commitctx(cctx, True)\\n             except: # re-raises\\n                 if edited:\\n                     self.ui.write(\\n                         _('note: commit message saved in %s\\\\n') % msgfn)\\n                 raise\\n             # update bookmarks, dirstate and mergestate\\n             bookmarks.update(self, [p1, p2], ret)\\n             cctx.markcommitted(ret)\\n             ms.reset()\\n             tr.close()\\n \\n         finally:\\n             lockmod.release(tr, lock, wlock)\\n \\n         def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):\\n             # hack for command that use a temporary commit (eg: histedit)\\n             # temporary commit got stripped before hook release\\n             if self.changelog.hasnode(ret):\\n                 self.hook(\\\"commit\\\", node=node, parent1=parent1,\\n                           parent2=parent2)\\n         self._afterlock(commithook)\\n         return ret\\n \\n     @unfilteredmethod\\n     def commitctx(self, ctx, error=False):\\n         \\\"\\\"\\\"Add a new revision to current repository.\\n         Revision information is passed via the context argument.\\n         \\\"\\\"\\\"\\n \\n         tr = None\\n         p1, p2 = ctx.p1(), ctx.p2()\\n         user = ctx.user()\\n \\n         lock = self.lock()\\n         try:\\n             tr = self.transaction(\\\"commit\\\")\\n             trp = weakref.proxy(tr)\\n \\n             if ctx.manifestnode():\\n                 # reuse an existing manifest revision\\n                 mn = ctx.manifestnode()\\n                 files = ctx.files()\\n             elif ctx.files():\\n                 m1ctx = p1.manifestctx()\\n                 m2ctx = p2.manifestctx()\\n                 mctx = m1ctx.copy()\\n \\n                 m = mctx.read()\\n                 m1 = m1ctx.read()\\n                 m2 = m2ctx.read()\\n \\n                 # check in files\\n                 added = []\\n                 changed = []\\n                 removed = list(ctx.removed())\\n                 linkrev = len(self)\\n                 self.ui.note(_(\\\"committing files:\\\\n\\\"))\\n                 for f in sorted(ctx.modified() + ctx.added()):\\n                     self.ui.note(f + \\\"\\\\n\\\")\\n                     try:\\n                         fctx = ctx[f]\\n                         if fctx is None:\\n                             removed.append(f)\\n                         else:\\n                             added.append(f)\\n                             m[f] = self._filecommit(fctx, m1, m2, linkrev,\\n                                                     trp, changed)\\n                             m.setflag(f, fctx.flags())\\n                     except OSError as inst:\\n                         self.ui.warn(_(\\\"trouble committing %s!\\\\n\\\") % f)\\n                         raise\\n                     except IOError as inst:\\n                         errcode = getattr(inst, 'errno', errno.ENOENT)\\n                         if error or errcode and errcode != errno.ENOENT:\\n                             self.ui.warn(_(\\\"trouble committing %s!\\\\n\\\") % f)\\n                         raise\\n \\n                 # update manifest\\n                 self.ui.note(_(\\\"committing manifest\\\\n\\\"))\\n                 removed = [f for f in sorted(removed) if f in m1 or f in m2]\\n                 drop = [f for f in removed if f in m]\\n                 for f in drop:\\n                     del m[f]\\n                 mn = mctx.write(trp, linkrev,\\n                                 p1.manifestnode(), p2.manifestnode(),\\n                                 added, drop)\\n                 files = changed + removed\\n             else:\\n                 mn = p1.manifestnode()\\n                 files = []\\n \\n             # update changelog\\n             self.ui.note(_(\\\"committing changelog\\\\n\\\"))\\n             self.changelog.delayupdate(tr)\\n             n = self.changelog.add(mn, files, ctx.description(),\\n                                    trp, p1.node(), p2.node(),\\n                                    user, ctx.date(), ctx.extra().copy())\\n             xp1, xp2 = p1.hex(), p2 and p2.hex() or ''\\n             self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,\\n                       parent2=xp2)\\n             # set the new commit is proper phase\\n             targetphase = subrepo.newcommitphase(self.ui, ctx)\\n             if targetphase:\\n                 # retract boundary do not alter parent changeset.\\n                 # if a parent have higher the resulting phase will\\n                 # be compliant anyway\\n                 #\\n                 # if minimal phase was 0 we don't need to retract anything\\n                 phases.registernew(self, tr, targetphase, [n])\\n             tr.close()\\n             return n\\n         finally:\\n             if tr:\\n                 tr.release()\\n             lock.release()\\n \\n     @unfilteredmethod\\n     def destroying(self):\\n         '''Inform the repository that nodes are about to be destroyed.\\n         Intended for use by strip and rollback, so there's a common\\n         place for anything that has to be done before destroying history.\\n \\n         This is mostly useful for saving state that is in memory and waiting\\n         to be flushed when the current lock is released. Because a call to\\n         destroyed is imminent, the repo will be invalidated causing those\\n         changes to stay in memory (waiting for the next unlock), or vanish\\n         completely.\\n         '''\\n         # When using the same lock to commit and strip, the phasecache is left\\n         # dirty after committing. Then when we strip, the repo is invalidated,\\n         # causing those changes to disappear.\\n         if '_phasecache' in vars(self):\\n             self._phasecache.write()\\n \\n     @unfilteredmethod\\n     def destroyed(self):\\n         '''Inform the repository that nodes have been destroyed.\\n         Intended for use by strip and rollback, so there's a common\\n         place for anything that has to be done after destroying history.\\n         '''\\n         # When one tries to:\\n         # 1) destroy nodes thus calling this method (e.g. strip)\\n         # 2) use phasecache somewhere (e.g. commit)\\n         #\\n         # then 2) will fail because the phasecache contains nodes that were\\n         # removed. We can either remove phasecache from the filecache,\\n         # causing it to reload next time it is accessed, or simply filter\\n         # the removed nodes now and write the updated cache.\\n         self._phasecache.filterunknown(self)\\n         self._phasecache.write()\\n \\n         # refresh all repository caches\\n         self.updatecaches()\\n \\n         # Ensure the persistent tag cache is updated.  Doing it now\\n         # means that the tag cache only has to worry about destroyed\\n         # heads immediately after a strip\\/rollback.  That in turn\\n         # guarantees that \\\"cachetip == currenttip\\\" (comparing both rev\\n         # and node) always means no nodes have been added or destroyed.\\n \\n         # XXX this is suboptimal when qrefresh'ing: we strip the current\\n         # head, refresh the tag cache, then immediately add a new head.\\n         # But I think doing it this way is necessary for the \\\"instant\\n         # tag cache retrieval\\\" case to work.\\n         self.invalidate()\\n \\n     def walk(self, match, node=None):\\n         '''\\n         walk recursively through the directory tree or a given\\n         changeset, finding all files matched by the match\\n         function\\n         '''\\n         self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')\\n         return self[node].walk(match)\\n \\n     def status(self, node1='.', node2=None, match=None,\\n                ignored=False, clean=False, unknown=False,\\n                listsubrepos=False):\\n         '''a convenience method that calls node1.status(node2)'''\\n         return self[node1].status(node2, match, ignored, clean, unknown,\\n                                   listsubrepos)\\n \\n     def addpostdsstatus(self, ps):\\n         \\\"\\\"\\\"Add a callback to run within the wlock, at the point at which status\\n         fixups happen.\\n \\n         On status completion, callback(wctx, status) will be called with the\\n         wlock held, unless the dirstate has changed from underneath or the wlock\\n         couldn't be grabbed.\\n \\n         Callbacks should not capture and use a cached copy of the dirstate --\\n         it might change in the meanwhile. Instead, they should access the\\n         dirstate via wctx.repo().dirstate.\\n \\n         This list is emptied out after each status run -- extensions should\\n         make sure it adds to this list each time dirstate.status is called.\\n         Extensions should also make sure they don't call this for statuses\\n         that don't involve the dirstate.\\n         \\\"\\\"\\\"\\n \\n         # The list is located here for uniqueness reasons -- it is actually\\n         # managed by the workingctx, but that isn't unique per-repo.\\n         self._postdsstatus.append(ps)\\n \\n     def postdsstatus(self):\\n         \\\"\\\"\\\"Used by workingctx to get the list of post-dirstate-status hooks.\\\"\\\"\\\"\\n         return self._postdsstatus\\n \\n     def clearpostdsstatus(self):\\n         \\\"\\\"\\\"Used by workingctx to clear post-dirstate-status hooks.\\\"\\\"\\\"\\n         del self._postdsstatus[:]\\n \\n     def heads(self, start=None):\\n         if start is None:\\n             cl = self.changelog\\n             headrevs = reversed(cl.headrevs())\\n             return [cl.node(rev) for rev in headrevs]\\n \\n         heads = self.changelog.heads(start)\\n         # sort the output in rev descending order\\n         return sorted(heads, key=self.changelog.rev, reverse=True)\\n \\n     def branchheads(self, branch=None, start=None, closed=False):\\n         '''return a (possibly filtered) list of heads for the given branch\\n \\n         Heads are returned in topological order, from newest to oldest.\\n         If branch is None, use the dirstate branch.\\n         If start is not None, return only heads reachable from start.\\n         If closed is True, return heads that are marked as closed as well.\\n         '''\\n         if branch is None:\\n             branch = self[None].branch()\\n         branches = self.branchmap()\\n         if branch not in branches:\\n             return []\\n         # the cache returns heads ordered lowest to highest\\n         bheads = list(reversed(branches.branchheads(branch, closed=closed)))\\n         if start is not None:\\n             # filter out the heads that cannot be reached from startrev\\n             fbheads = set(self.changelog.nodesbetween([start], bheads)[2])\\n             bheads = [h for h in bheads if h in fbheads]\\n         return bheads\\n \\n     def branches(self, nodes):\\n         if not nodes:\\n             nodes = [self.changelog.tip()]\\n         b = []\\n         for n in nodes:\\n             t = n\\n             while True:\\n                 p = self.changelog.parents(n)\\n                 if p[1] != nullid or p[0] == nullid:\\n                     b.append((t, n, p[0], p[1]))\\n                     break\\n                 n = p[0]\\n         return b\\n \\n     def between(self, pairs):\\n         r = []\\n \\n         for top, bottom in pairs:\\n             n, l, i = top, [], 0\\n             f = 1\\n \\n             while n != bottom and n != nullid:\\n                 p = self.changelog.parents(n)[0]\\n                 if i == f:\\n                     l.append(n)\\n                     f = f * 2\\n                 n = p\\n                 i += 1\\n \\n             r.append(l)\\n \\n         return r\\n \\n     def checkpush(self, pushop):\\n         \\\"\\\"\\\"Extensions can override this function if additional checks have\\n         to be performed before pushing, or call it if they override push\\n         command.\\n         \\\"\\\"\\\"\\n \\n     @unfilteredpropertycache\\n     def prepushoutgoinghooks(self):\\n         \\\"\\\"\\\"Return util.hooks consists of a pushop with repo, remote, outgoing\\n         methods, which are called before pushing changesets.\\n         \\\"\\\"\\\"\\n         return util.hooks()\\n \\n     def pushkey(self, namespace, key, old, new):\\n         try:\\n             tr = self.currenttransaction()\\n             hookargs = {}\\n             if tr is not None:\\n                 hookargs.update(tr.hookargs)\\n             hookargs['namespace'] = namespace\\n             hookargs['key'] = key\\n             hookargs['old'] = old\\n             hookargs['new'] = new\\n             self.hook('prepushkey', throw=True, **hookargs)\\n         except error.HookAbort as exc:\\n             self.ui.write_err(_(\\\"pushkey-abort: %s\\\\n\\\") % exc)\\n             if exc.hint:\\n                 self.ui.write_err(_(\\\"(%s)\\\\n\\\") % exc.hint)\\n             return False\\n         self.ui.debug('pushing key for \\\"%s:%s\\\"\\\\n' % (namespace, key))\\n         ret = pushkey.push(self, namespace, key, old, new)\\n         def runhook():\\n             self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,\\n                       ret=ret)\\n         self._afterlock(runhook)\\n         return ret\\n \\n     def listkeys(self, namespace):\\n         self.hook('prelistkeys', throw=True, namespace=namespace)\\n         self.ui.debug('listing keys for \\\"%s\\\"\\\\n' % namespace)\\n         values = pushkey.list(self, namespace)\\n         self.hook('listkeys', namespace=namespace, values=values)\\n         return values\\n \\n     def debugwireargs(self, one, two, three=None, four=None, five=None):\\n         '''used to test argument passing over the wire'''\\n         return \\\"%s %s %s %s %s\\\" % (one, two, three, four, five)\\n \\n     def savecommitmessage(self, text):\\n         fp = self.vfs('last-message.txt', 'wb')\\n         try:\\n             fp.write(text)\\n         finally:\\n             fp.close()\\n         return self.pathto(fp.name[len(self.root) + 1:])\\n \\n # used to avoid circular references so destructors work\\n def aftertrans(files):\\n     renamefiles = [tuple(t) for t in files]\\n     def a():\\n         for vfs, src, dest in renamefiles:\\n             # if src and dest refer to a same file, vfs.rename is a no-op,\\n             # leaving both src and dest on disk. delete dest to make sure\\n             # the rename couldn't be such a no-op.\\n             vfs.tryunlink(dest)\\n             try:\\n                 vfs.rename(src, dest)\\n             except OSError: # journal file does not yet exist\\n                 pass\\n     return a\\n \\n def undoname(fn):\\n     base, name = os.path.split(fn)\\n     assert name.startswith('journal')\\n     return os.path.join(base, name.replace('journal', 'undo', 1))\\n \\n def instance(ui, path, create):\\n     return localrepository(ui, util.urllocalpath(path), create)\\n \\n def islocal(path):\\n     return True\\n \\n def newreporequirements(repo):\\n     \\\"\\\"\\\"Determine the set of requirements for a new local repository.\\n \\n     Extensions can wrap this function to specify custom requirements for\\n     new repositories.\\n     \\\"\\\"\\\"\\n     ui = repo.ui\\n     requirements = {'revlogv1'}\\n     if ui.configbool('format', 'usestore'):\\n         requirements.add('store')\\n         if ui.configbool('format', 'usefncache'):\\n             requirements.add('fncache')\\n             if ui.configbool('format', 'dotencode'):\\n                 requirements.add('dotencode')\\n \\n     compengine = ui.config('experimental', 'format.compression')\\n     if compengine not in util.compengines:\\n         raise error.Abort(_('compression engine %s defined by '\\n                             'experimental.format.compression not available') %\\n                           compengine,\\n                           hint=_('run \\\"hg debuginstall\\\" to list available '\\n                                  'compression engines'))\\n \\n     # zlib is the historical default and doesn't need an explicit requirement.\\n     if compengine != 'zlib':\\n         requirements.add('exp-compression-%s' % compengine)\\n \\n     if scmutil.gdinitconfig(ui):\\n         requirements.add('generaldelta')\\n     if ui.configbool('experimental', 'treemanifest'):\\n         requirements.add('treemanifest')\\n     if ui.configbool('experimental', 'manifestv2'):\\n         requirements.add('manifestv2')\\n \\n     revlogv2 = ui.config('experimental', 'revlogv2')\\n     if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':\\n         requirements.remove('revlogv1')\\n         # generaldelta is implied by revlogv2.\\n         requirements.discard('generaldelta')\\n         requirements.add(REVLOGV2_REQUIREMENT)\\n \\n     return requirements\\n\"}]}],\"properties\":{\"hg:meta\":{\"date\":\"1509404054 -19800\",\"node\":\"44fc1c1f1774a76423b9c732af6938435099bcc5\",\"user\":\"Pulkit Goyal \\u003c7895pulkit@gmail.com\\u003e\",\"parent\":\"8feef8ef8389a3b544e0a74624f1efc3a8d85d35\"}}}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "146"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B4092%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:21 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":\"diff --git a\\/mercurial\\/repoview.py b\\/mercurial\\/repoview.py\\n--- a\\/mercurial\\/repoview.py\\n+++ b\\/mercurial\\/repoview.py\\n@@ -185,6 +185,9 @@\\n     subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.\\n     \\\"\\\"\\\"\\n \\n+    # hidden revs which should be visible\\n+    _visibilityexceptions = set()\\n+\\n     def __init__(self, repo, filtername):\\n         object.__setattr__(self, r'_unfilteredrepo', repo)\\n         object.__setattr__(self, r'filtername', filtername)\\n@@ -231,6 +234,14 @@\\n             return self\\n         return self.unfiltered().filtered(name)\\n \\n+    def addvisibilityexceptions(self, revs):\\n+        \\\"\\\"\\\"adds hidden revs which should be visible to set of exceptions\\\"\\\"\\\"\\n+        self._visibilityexceptions.update(revs)\\n+\\n+    def getvisibilityexceptions(self):\\n+        \\\"\\\"\\\"returns the set of hidden revs which should be visible\\\"\\\"\\\"\\n+        return self._visibilityexceptions\\n+\\n     # everything access are forwarded to the proxied repo\\n     def __getattr__(self, attr):\\n         return getattr(self._unfilteredrepo, attr)\\ndiff --git a\\/mercurial\\/localrepo.py b\\/mercurial\\/localrepo.py\\n--- a\\/mercurial\\/localrepo.py\\n+++ b\\/mercurial\\/localrepo.py\\n@@ -570,6 +570,14 @@\\n     def close(self):\\n         self._writecaches()\\n \\n+    def addvisibilityexceptions(self, exceptions):\\n+        # should be called on a filtered repository\\n+        pass\\n+\\n+    def getvisibilityexceptions(self):\\n+        # should be called on a filtered repository\\n+        return set()\\n+\\n     def _loadextensions(self):\\n         extensions.loadall(self.ui)\\n \\n\\n\",\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.getrawdiff", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "143"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22diffID%22%3A+4092%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabsend-comment-created.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,617 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:55 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"data\":[{\"id\":2,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":null,\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"refRules\":{\"fetchRules\":[],\"trackRules\":[],\"permanentRefRules\":[]},\"spacePHID\":null,\"dateCreated\":1498761653,\"dateModified\":1500403184,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "183"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22constraints%22%3A+%7B%22callsigns%22%3A+%5B%22HG%22%5D%7D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:56 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"diffid\":19395,\"phid\":\"PHID-DIFF-bfatspvpv25tfkj2zeqw\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/19395\\/\"},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "1178"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22comment%22%3A+%7B%22addLines%22%3A+1%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22comment%22%2C+%22delLines%22%3A+0%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+1%2C+%22corpus%22%3A+%22%2Bcomment%5Cn%22%2C+%22delLines%22%3A+0%2C+%22newLength%22%3A+1%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+0%2C+%22oldOffset%22%3A+0%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%22unix%3Afilemode%22%3A+%22100644%22%7D%2C+%22oldPath%22%3A+null%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+1%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%223244dc4a33342b4d91ad534ae091685244ac5ed4%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:57 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22d5dddca9023d1025bf135b946a4dfe38901ab46d%5C%22%2C+%5C%22parent%5C%22%3A+%5C%223244dc4a33342b4d91ad534ae091685244ac5ed4%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19395%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:57 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22d5dddca9023d1025bf135b946a4dfe38901ab46d%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22d5dddca9023d1025bf135b946a4dfe38901ab46d%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%223244dc4a33342b4d91ad534ae091685244ac5ed4%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19395%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:58 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create comment for phabricator test\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create comment for phabricator test\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "180"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22create+comment+for+phabricator+test%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:59 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":7919,\"phid\":\"PHID-DREV-mrxkguxqg3qmf6o3ah4d\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-leebtwz4loeiuwx\"},{\"phid\":\"PHID-XACT-DREV-lpxwfvxsdm6y5lq\"},{\"phid\":\"PHID-XACT-DREV-fb3qpskghwcxurl\"},{\"phid\":\"PHID-XACT-DREV-bmekz2k4c3bmsog\"},{\"phid\":\"PHID-XACT-DREV-tydrnhs4cnyi2ng\"},{\"phid\":\"PHID-XACT-DREV-lbctcnaslgwcjvu\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "413"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-bfatspvpv25tfkj2zeqw%22%7D%2C+%7B%22type%22%3A+%22comment%22%2C+%22value%22%3A+%22For+default+branch%22%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22create+comment+for+phabricator+test%22%7D%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:00 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"7919\",\"phid\":\"PHID-DREV-mrxkguxqg3qmf6o3ah4d\",\"title\":\"create comment for phabricator test\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D7919\",\"dateCreated\":\"1579221179\",\"dateModified\":\"1579221179\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":1,\"lines.removed\":0},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"1\",\"activeDiffPHID\":\"PHID-DIFF-bfatspvpv25tfkj2zeqw\",\"diffs\":[\"19395\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "146"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B7919%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:00 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22f7db812bbe1db49d86823e6d7b9ab4b30539f801%5C%22%2C+%5C%22parent%5C%22%3A+%5C%223244dc4a33342b4d91ad534ae091685244ac5ed4%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19395%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:01 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22f7db812bbe1db49d86823e6d7b9ab4b30539f801%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22f7db812bbe1db49d86823e6d7b9ab4b30539f801%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%223244dc4a33342b4d91ad534ae091685244ac5ed4%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19395%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabsend-comment-updated.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,549 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:10 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"19395\":{\"id\":\"19395\",\"revisionID\":\"7919\",\"dateCreated\":\"1579221176\",\"dateModified\":\"1579221179\",\"sourceControlBaseRevision\":\"3244dc4a33342b4d91ad534ae091685244ac5ed4\",\"sourceControlPath\":\"\\/\",\"sourceControlSystem\":\"hg\",\"branch\":\"default\",\"bookmark\":null,\"creationMethod\":\"phabsend\",\"description\":null,\"unitStatus\":\"0\",\"lintStatus\":\"0\",\"changes\":[{\"id\":\"52928\",\"metadata\":{\"line:first\":1,\"hash.effect\":\"mzg_LBhhVYqb\"},\"oldPath\":null,\"currentPath\":\"comment\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"1\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"1\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+comment\\n\"}]}],\"properties\":{\"hg:meta\":{\"branch\":\"default\",\"date\":\"0 0\",\"node\":\"f7db812bbe1db49d86823e6d7b9ab4b30539f801\",\"parent\":\"3244dc4a33342b4d91ad534ae091685244ac5ed4\",\"user\":\"test\"},\"local:commits\":{\"f7db812bbe1db49d86823e6d7b9ab4b30539f801\":{\"author\":\"test\",\"authorEmail\":\"test\",\"branch\":\"default\",\"commit\":\"f7db812bbe1db49d86823e6d7b9ab4b30539f801\",\"parents\":[\"3244dc4a33342b4d91ad534ae091685244ac5ed4\"],\"time\":0}}},\"authorName\":\"test\",\"authorEmail\":\"test\"}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "154"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22revisionIDs%22%3A+%5B7919%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:11 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"data\":[{\"id\":2,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":null,\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"refRules\":{\"fetchRules\":[],\"trackRules\":[],\"permanentRefRules\":[]},\"spacePHID\":null,\"dateCreated\":1498761653,\"dateModified\":1500403184,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "183"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22constraints%22%3A+%7B%22callsigns%22%3A+%5B%22HG%22%5D%7D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:11 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"diffid\":19396,\"phid\":\"PHID-DIFF-peqlcs25nvzqrns6izrf\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/19396\\/\"},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "1193"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22comment%22%3A+%7B%22addLines%22%3A+2%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22comment%22%2C+%22delLines%22%3A+0%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+2%2C+%22corpus%22%3A+%22%2Bcomment%5Cn%2Bcomment2%5Cn%22%2C+%22delLines%22%3A+0%2C+%22newLength%22%3A+2%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+0%2C+%22oldOffset%22%3A+0%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%22unix%3Afilemode%22%3A+%22100644%22%7D%2C+%22oldPath%22%3A+null%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+1%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%223244dc4a33342b4d91ad534ae091685244ac5ed4%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:12 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%221849d7828727a28e14c589323e4f8c9a1c8d2816%5C%22%2C+%5C%22parent%5C%22%3A+%5C%223244dc4a33342b4d91ad534ae091685244ac5ed4%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19396%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:13 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%221849d7828727a28e14c589323e4f8c9a1c8d2816%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%221849d7828727a28e14c589323e4f8c9a1c8d2816%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%223244dc4a33342b4d91ad534ae091685244ac5ed4%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19396%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:13 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create comment for phabricator test\",\"revisionID\":7919},\"revisionIDFieldInfo\":{\"value\":7919,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create comment for phabricator test\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "257"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22create+comment+for+phabricator+test%5Cn%5CnDifferential+Revision%3A+https%3A%2F%2Fphab.mercurial-scm.org%2FD7919%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:14 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":7919,\"phid\":\"PHID-DREV-mrxkguxqg3qmf6o3ah4d\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-fstsdazolwve3l5\"},{\"phid\":\"PHID-XACT-DREV-ctvtbwhgeyr4oau\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "452"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22objectIdentifier%22%3A+7919%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-peqlcs25nvzqrns6izrf%22%7D%2C+%7B%22type%22%3A+%22comment%22%2C+%22value%22%3A+%22Address+review+comments%22%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22create+comment+for+phabricator+test%22%7D%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:15 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"7919\",\"phid\":\"PHID-DREV-mrxkguxqg3qmf6o3ah4d\",\"title\":\"create comment for phabricator test\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D7919\",\"dateCreated\":\"1579221179\",\"dateModified\":\"1579221194\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":2,\"lines.removed\":0},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-peqlcs25nvzqrns6izrf\",\"diffs\":[\"19396\",\"19395\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "146"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B7919%5D%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabsend-create-alpha.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,617 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:00 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"data\":[{\"id\":2,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":null,\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"refRules\":{\"fetchRules\":[],\"trackRules\":[],\"permanentRefRules\":[]},\"spacePHID\":null,\"dateCreated\":1498761653,\"dateModified\":1500403184,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "183"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22constraints%22%3A+%7B%22callsigns%22%3A+%5B%22HG%22%5D%7D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:01 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"diffid\":19390,\"phid\":\"PHID-DIFF-36wohqs4e4l6spcrzg65\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/19390\\/\"},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "1172"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22alpha%22%3A+%7B%22addLines%22%3A+1%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22alpha%22%2C+%22delLines%22%3A+0%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+1%2C+%22corpus%22%3A+%22%2Balpha%5Cn%22%2C+%22delLines%22%3A+0%2C+%22newLength%22%3A+1%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+0%2C+%22oldOffset%22%3A+0%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%22unix%3Afilemode%22%3A+%22100644%22%7D%2C+%22oldPath%22%3A+null%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+1%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%220000000000000000000000000000000000000000%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:01 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22d386117f30e6b1282897bdbde75ac21e095163d4%5C%22%2C+%5C%22parent%5C%22%3A+%5C%220000000000000000000000000000000000000000%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19390%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:02 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22d386117f30e6b1282897bdbde75ac21e095163d4%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22d386117f30e6b1282897bdbde75ac21e095163d4%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%220000000000000000000000000000000000000000%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19390%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:03 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create alpha for phabricator test \\u20ac\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create alpha for phabricator test \\u20ac\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "187"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22create+alpha+for+phabricator+test+%5Cu20ac%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:03 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":7915,\"phid\":\"PHID-DREV-3mzbavd2ajsbar5l3esr\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-55yg7c5j7d6wjfv\"},{\"phid\":\"PHID-XACT-DREV-wizrkanyez7gzuu\"},{\"phid\":\"PHID-XACT-DREV-aobh3arrwbdwoh7\"},{\"phid\":\"PHID-XACT-DREV-zwea5upt4cn23ve\"},{\"phid\":\"PHID-XACT-DREV-dklz3iyhmwcoi5p\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "340"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-36wohqs4e4l6spcrzg65%22%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22create+alpha+for+phabricator+test+%5Cu20ac%22%7D%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:04 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"7915\",\"phid\":\"PHID-DREV-3mzbavd2ajsbar5l3esr\",\"title\":\"create alpha for phabricator test \\u20ac\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D7915\",\"dateCreated\":\"1579221124\",\"dateModified\":\"1579221124\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":1,\"lines.removed\":0},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"1\",\"activeDiffPHID\":\"PHID-DIFF-36wohqs4e4l6spcrzg65\",\"diffs\":[\"19390\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "146"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B7915%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:05 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22347bf67801e55faaffe1616c6bac53cdf6b6dfc2%5C%22%2C+%5C%22parent%5C%22%3A+%5C%220000000000000000000000000000000000000000%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19390%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:06 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22347bf67801e55faaffe1616c6bac53cdf6b6dfc2%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22347bf67801e55faaffe1616c6bac53cdf6b6dfc2%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%220000000000000000000000000000000000000000%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19390%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabsend-create-public.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,957 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:37 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"data\":[{\"id\":2,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":null,\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"refRules\":{\"fetchRules\":[],\"trackRules\":[],\"permanentRefRules\":[]},\"spacePHID\":null,\"dateCreated\":1498761653,\"dateModified\":1500403184,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "183"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22constraints%22%3A+%7B%22callsigns%22%3A+%5B%22HG%22%5D%7D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:37 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"diffid\":19393,\"phid\":\"PHID-DIFF-e64weyerxtutv2jvj2dt\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/19393\\/\"},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "1156"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22beta%22%3A+%7B%22addLines%22%3A+1%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22beta%22%2C+%22delLines%22%3A+1%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+1%2C+%22corpus%22%3A+%22-beta%5Cn%2Bpublic+change%5Cn%22%2C+%22delLines%22%3A+1%2C+%22newLength%22%3A+1%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+1%2C+%22oldOffset%22%3A+1%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%7D%2C+%22oldPath%22%3A+%22beta%22%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+2%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%22a692622e693757674f85ff481c7ff77057a7f82a%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:38 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%227b4185ab5d16acf98e41d566be38c5dbea10878d%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22a692622e693757674f85ff481c7ff77057a7f82a%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19393%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:39 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%227b4185ab5d16acf98e41d566be38c5dbea10878d%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%227b4185ab5d16acf98e41d566be38c5dbea10878d%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22a692622e693757674f85ff481c7ff77057a7f82a%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19393%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:39 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create public change for phabricator testing\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create public change for phabricator testing\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "189"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22create+public+change+for+phabricator+testing%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:40 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":7917,\"phid\":\"PHID-DREV-yhl3yvijs4jploa5iqm4\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-kljgpfhorijhw6v\"},{\"phid\":\"PHID-XACT-DREV-muujhcz6hb4n77e\"},{\"phid\":\"PHID-XACT-DREV-a74sh3ztjjnn2rh\"},{\"phid\":\"PHID-XACT-DREV-eboclo4ac3olsht\"},{\"phid\":\"PHID-XACT-DREV-d6mrwth26sbchrm\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "342"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-e64weyerxtutv2jvj2dt%22%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22create+public+change+for+phabricator+testing%22%7D%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:41 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"diffid\":19394,\"phid\":\"PHID-DIFF-pqdlhei24n47fzeofjph\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/19394\\/\"},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "1168"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22alpha%22%3A+%7B%22addLines%22%3A+1%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22alpha%22%2C+%22delLines%22%3A+2%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+1%2C+%22corpus%22%3A+%22-alpha%5Cn-more%5Cn%2Bdraft+change%5Cn%22%2C+%22delLines%22%3A+2%2C+%22newLength%22%3A+1%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+2%2C+%22oldOffset%22%3A+1%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%7D%2C+%22oldPath%22%3A+%22alpha%22%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+2%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%227b4185ab5d16acf98e41d566be38c5dbea10878d%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:42 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22251c1c333fc660c18e841b9bebeabc37c3e56455%5C%22%2C+%5C%22parent%5C%22%3A+%5C%227b4185ab5d16acf98e41d566be38c5dbea10878d%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19394%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:42 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22251c1c333fc660c18e841b9bebeabc37c3e56455%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22251c1c333fc660c18e841b9bebeabc37c3e56455%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%227b4185ab5d16acf98e41d566be38c5dbea10878d%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19394%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:43 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create draft change for phabricator testing\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create draft change for phabricator testing\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "188"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22create+draft+change+for+phabricator+testing%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:44 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":7918,\"phid\":\"PHID-DREV-sfsckrwrwc77rdl3k5rz\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-rpqqvsy7pcpvau6\"},{\"phid\":\"PHID-XACT-DREV-mjyhm2rmcu57gow\"},{\"phid\":\"PHID-XACT-DREV-qhnb42g3u3yex2y\"},{\"phid\":\"PHID-XACT-DREV-4a7yil3a62stly2\"},{\"phid\":\"PHID-XACT-DREV-ipielqp34itcncl\"},{\"phid\":\"PHID-XACT-DREV-cmv7ezhpsxlbj3a\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "443"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-pqdlhei24n47fzeofjph%22%7D%2C+%7B%22type%22%3A+%22parents.set%22%2C+%22value%22%3A+%5B%22PHID-DREV-yhl3yvijs4jploa5iqm4%22%5D%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22create+draft+change+for+phabricator+testing%22%7D%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:45 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"7918\",\"phid\":\"PHID-DREV-sfsckrwrwc77rdl3k5rz\",\"title\":\"create draft change for phabricator testing\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D7918\",\"dateCreated\":\"1579221164\",\"dateModified\":\"1579221164\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":1,\"lines.removed\":2},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"3\",\"activeDiffPHID\":\"PHID-DIFF-pqdlhei24n47fzeofjph\",\"diffs\":[\"19394\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-yhl3yvijs4jploa5iqm4\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"},{\"id\":\"7917\",\"phid\":\"PHID-DREV-yhl3yvijs4jploa5iqm4\",\"title\":\"create public change for phabricator testing\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D7917\",\"dateCreated\":\"1579221160\",\"dateModified\":\"1579221164\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":1,\"lines.removed\":1},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-e64weyerxtutv2jvj2dt\",\"diffs\":[\"19393\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "154"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B7917%2C+7918%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:45 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%223244dc4a33342b4d91ad534ae091685244ac5ed4%5C%22%2C+%5C%22parent%5C%22%3A+%5C%227b4185ab5d16acf98e41d566be38c5dbea10878d%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19394%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:46 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%223244dc4a33342b4d91ad534ae091685244ac5ed4%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%223244dc4a33342b4d91ad534ae091685244ac5ed4%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%227b4185ab5d16acf98e41d566be38c5dbea10878d%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19394%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabsend-skipped.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,141 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:17 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"19396\":{\"id\":\"19396\",\"revisionID\":\"7919\",\"dateCreated\":\"1579221192\",\"dateModified\":\"1579221194\",\"sourceControlBaseRevision\":\"3244dc4a33342b4d91ad534ae091685244ac5ed4\",\"sourceControlPath\":\"\\/\",\"sourceControlSystem\":\"hg\",\"branch\":\"default\",\"bookmark\":null,\"creationMethod\":\"phabsend\",\"description\":null,\"unitStatus\":\"0\",\"lintStatus\":\"0\",\"changes\":[{\"id\":\"52929\",\"metadata\":{\"line:first\":1,\"hash.effect\":\"sOtQ9WtAYaL5\"},\"oldPath\":null,\"currentPath\":\"comment\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"2\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"2\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+comment\\n+comment2\\n\"}]}],\"properties\":{\"hg:meta\":{\"branch\":\"default\",\"date\":\"0 0\",\"node\":\"1849d7828727a28e14c589323e4f8c9a1c8d2816\",\"parent\":\"3244dc4a33342b4d91ad534ae091685244ac5ed4\",\"user\":\"test\"},\"local:commits\":{\"1849d7828727a28e14c589323e4f8c9a1c8d2816\":{\"author\":\"test\",\"authorEmail\":\"test\",\"branch\":\"default\",\"commit\":\"1849d7828727a28e14c589323e4f8c9a1c8d2816\",\"parents\":[\"3244dc4a33342b4d91ad534ae091685244ac5ed4\"],\"time\":0}}},\"authorName\":\"test\",\"authorEmail\":\"test\"},\"19395\":{\"id\":\"19395\",\"revisionID\":\"7919\",\"dateCreated\":\"1579221176\",\"dateModified\":\"1579221179\",\"sourceControlBaseRevision\":\"3244dc4a33342b4d91ad534ae091685244ac5ed4\",\"sourceControlPath\":\"\\/\",\"sourceControlSystem\":\"hg\",\"branch\":\"default\",\"bookmark\":null,\"creationMethod\":\"phabsend\",\"description\":null,\"unitStatus\":\"0\",\"lintStatus\":\"0\",\"changes\":[{\"id\":\"52928\",\"metadata\":{\"line:first\":1,\"hash.effect\":\"mzg_LBhhVYqb\"},\"oldPath\":null,\"currentPath\":\"comment\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"1\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"1\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+comment\\n\"}]}],\"properties\":{\"hg:meta\":{\"branch\":\"default\",\"date\":\"0 0\",\"node\":\"f7db812bbe1db49d86823e6d7b9ab4b30539f801\",\"parent\":\"3244dc4a33342b4d91ad534ae091685244ac5ed4\",\"user\":\"test\"},\"local:commits\":{\"f7db812bbe1db49d86823e6d7b9ab4b30539f801\":{\"author\":\"test\",\"authorEmail\":\"test\",\"branch\":\"default\",\"commit\":\"f7db812bbe1db49d86823e6d7b9ab4b30539f801\",\"parents\":[\"3244dc4a33342b4d91ad534ae091685244ac5ed4\"],\"time\":0}}},\"authorName\":\"test\",\"authorEmail\":\"test\"}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "154"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22revisionIDs%22%3A+%5B7919%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:33:18 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"7919\",\"phid\":\"PHID-DREV-mrxkguxqg3qmf6o3ah4d\",\"title\":\"create comment for phabricator test\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D7919\",\"dateCreated\":\"1579221179\",\"dateModified\":\"1579221194\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":2,\"lines.removed\":0},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-peqlcs25nvzqrns6izrf\",\"diffs\":[\"19396\",\"19395\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "146"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B7919%5D%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabsend-update-alpha-create-beta.json	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,1028 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:18 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"19390\":{\"id\":\"19390\",\"revisionID\":\"7915\",\"dateCreated\":\"1579221121\",\"dateModified\":\"1579221124\",\"sourceControlBaseRevision\":\"0000000000000000000000000000000000000000\",\"sourceControlPath\":\"\\/\",\"sourceControlSystem\":\"hg\",\"branch\":\"default\",\"bookmark\":null,\"creationMethod\":\"phabsend\",\"description\":null,\"unitStatus\":\"0\",\"lintStatus\":\"0\",\"changes\":[{\"id\":\"52923\",\"metadata\":{\"line:first\":1,\"hash.effect\":\"g6dr_XSxA9EP\"},\"oldPath\":null,\"currentPath\":\"alpha\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"1\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"1\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+alpha\\n\"}]}],\"properties\":{\"hg:meta\":{\"branch\":\"default\",\"date\":\"0 0\",\"node\":\"347bf67801e55faaffe1616c6bac53cdf6b6dfc2\",\"parent\":\"0000000000000000000000000000000000000000\",\"user\":\"test\"},\"local:commits\":{\"347bf67801e55faaffe1616c6bac53cdf6b6dfc2\":{\"author\":\"test\",\"authorEmail\":\"test\",\"branch\":\"default\",\"commit\":\"347bf67801e55faaffe1616c6bac53cdf6b6dfc2\",\"parents\":[\"0000000000000000000000000000000000000000\"],\"time\":0}}},\"authorName\":\"test\",\"authorEmail\":\"test\"}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "154"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22revisionIDs%22%3A+%5B7915%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:18 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"data\":[{\"id\":2,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":null,\"status\":\"active\",\"isImporting\":false,\"almanacServicePHID\":null,\"refRules\":{\"fetchRules\":[],\"trackRules\":[],\"permanentRefRules\":[]},\"spacePHID\":null,\"dateCreated\":1498761653,\"dateModified\":1500403184,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "183"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22constraints%22%3A+%7B%22callsigns%22%3A+%5B%22HG%22%5D%7D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:19 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"diffid\":19391,\"phid\":\"PHID-DIFF-fu7z4h6aahgcq2h2q33b\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/19391\\/\"},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "1183"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22alpha%22%3A+%7B%22addLines%22%3A+2%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22alpha%22%2C+%22delLines%22%3A+0%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+2%2C+%22corpus%22%3A+%22%2Balpha%5Cn%2Bmore%5Cn%22%2C+%22delLines%22%3A+0%2C+%22newLength%22%3A+2%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+0%2C+%22oldOffset%22%3A+0%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%22unix%3Afilemode%22%3A+%22100644%22%7D%2C+%22oldPath%22%3A+null%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+1%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%220000000000000000000000000000000000000000%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:20 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22c44b38f24a454b288ac6977d9d75f71df027994b%5C%22%2C+%5C%22parent%5C%22%3A+%5C%220000000000000000000000000000000000000000%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19391%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:20 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22c44b38f24a454b288ac6977d9d75f71df027994b%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22c44b38f24a454b288ac6977d9d75f71df027994b%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%220000000000000000000000000000000000000000%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19391%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:21 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create alpha for phabricator test \\u20ac\",\"revisionID\":7915},\"revisionIDFieldInfo\":{\"value\":7915,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create alpha for phabricator test \\u20ac\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "264"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22create+alpha+for+phabricator+test+%5Cu20ac%5Cn%5CnDifferential+Revision%3A+https%3A%2F%2Fphab.mercurial-scm.org%2FD7915%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:22 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":7915,\"phid\":\"PHID-DREV-3mzbavd2ajsbar5l3esr\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-46hca4nfzc2t7qf\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "374"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22objectIdentifier%22%3A+7915%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-fu7z4h6aahgcq2h2q33b%22%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22create+alpha+for+phabricator+test+%5Cu20ac%22%7D%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:23 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"diffid\":19392,\"phid\":\"PHID-DIFF-vn5llgg5oh2rkzquipx4\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/19392\\/\"},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.creatediff", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "1169"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22bookmark%22%3A+null%2C+%22branch%22%3A+%22default%22%2C+%22changes%22%3A+%7B%22beta%22%3A+%7B%22addLines%22%3A+1%2C+%22awayPaths%22%3A+%5B%5D%2C+%22commitHash%22%3A+null%2C+%22currentPath%22%3A+%22beta%22%2C+%22delLines%22%3A+0%2C+%22fileType%22%3A+1%2C+%22hunks%22%3A+%5B%7B%22addLines%22%3A+1%2C+%22corpus%22%3A+%22%2Bbeta%5Cn%22%2C+%22delLines%22%3A+0%2C+%22newLength%22%3A+1%2C+%22newOffset%22%3A+1%2C+%22oldLength%22%3A+0%2C+%22oldOffset%22%3A+0%7D%5D%2C+%22metadata%22%3A+%7B%7D%2C+%22newProperties%22%3A+%7B%22unix%3Afilemode%22%3A+%22100644%22%7D%2C+%22oldPath%22%3A+null%2C+%22oldProperties%22%3A+%7B%7D%2C+%22type%22%3A+1%7D%7D%2C+%22creationMethod%22%3A+%22phabsend%22%2C+%22lintStatus%22%3A+%22none%22%2C+%22repositoryPHID%22%3A+%22PHID-REPO-bvunnehri4u2isyr7bc3%22%2C+%22sourceControlBaseRevision%22%3A+%22c44b38f24a454b288ac6977d9d75f71df027994b%22%2C+%22sourceControlPath%22%3A+%22%2F%22%2C+%22sourceControlSystem%22%3A+%22hg%22%2C+%22sourceMachine%22%3A+%22%22%2C+%22sourcePath%22%3A+%22%2F%22%2C+%22unitStatus%22%3A+%22none%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:23 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%229e6901f21d5bfd37793dc2cef58a33a3441c2f3b%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22c44b38f24a454b288ac6977d9d75f71df027994b%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19392%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:24 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%229e6901f21d5bfd37793dc2cef58a33a3441c2f3b%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%229e6901f21d5bfd37793dc2cef58a33a3441c2f3b%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22c44b38f24a454b288ac6977d9d75f71df027994b%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19392%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:25 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create beta for phabricator test\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"},\"transactions\":[{\"type\":\"title\",\"value\":\"create beta for phabricator test\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "177"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22corpus%22%3A+%22create+beta+for+phabricator+test%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "connection": [
+                        "close"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:25 GMT"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":7916,\"phid\":\"PHID-DREV-nk73cg2l2oqfozxnw2i3\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-qyy3foasgvhwuwc\"},{\"phid\":\"PHID-XACT-DREV-ke7lc4r5bg2um6j\"},{\"phid\":\"PHID-XACT-DREV-tayq76rfuwaxlo5\"},{\"phid\":\"PHID-XACT-DREV-6lnwmq75f43mu4l\"},{\"phid\":\"PHID-XACT-DREV-leh7oir7ayewz2p\"},{\"phid\":\"PHID-XACT-DREV-adrkqplhnjym4ln\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "432"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22update%22%2C+%22value%22%3A+%22PHID-DIFF-vn5llgg5oh2rkzquipx4%22%7D%2C+%7B%22type%22%3A+%22parents.set%22%2C+%22value%22%3A+%5B%22PHID-DREV-3mzbavd2ajsbar5l3esr%22%5D%7D%2C+%7B%22type%22%3A+%22title%22%2C+%22value%22%3A+%22create+beta+for+phabricator+test%22%7D%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:26 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"7916\",\"phid\":\"PHID-DREV-nk73cg2l2oqfozxnw2i3\",\"title\":\"create beta for phabricator test\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D7916\",\"dateCreated\":\"1579221145\",\"dateModified\":\"1579221145\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":1,\"lines.removed\":0},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"1\",\"activeDiffPHID\":\"PHID-DIFF-vn5llgg5oh2rkzquipx4\",\"diffs\":[\"19392\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-3mzbavd2ajsbar5l3esr\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"},{\"id\":\"7915\",\"phid\":\"PHID-DREV-3mzbavd2ajsbar5l3esr\",\"title\":\"create alpha for phabricator test \\u20ac\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D7915\",\"dateCreated\":\"1579221124\",\"dateModified\":\"1579221145\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":2,\"lines.removed\":0},\"branch\":\"default\",\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-fu7z4h6aahgcq2h2q33b\",\"diffs\":[\"19391\",\"19390\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[[\"hgcm\",\"\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\"]],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":\"\\/\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "154"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B7915%2C+7916%5D%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:27 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "482"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22date%5C%22%3A+%5C%220+0%5C%22%2C+%5C%22node%5C%22%3A+%5C%22a692622e693757674f85ff481c7ff77057a7f82a%5C%22%2C+%5C%22parent%5C%22%3A+%5C%22c44b38f24a454b288ac6977d9d75f71df027994b%5C%22%2C+%5C%22user%5C%22%3A+%5C%22test%5C%22%7D%22%2C+%22diff_id%22%3A+19392%2C+%22name%22%3A+%22hg%3Ameta%22%7D&output=json&__conduit__=1"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "transfer-encoding": [
+                        "chunked"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "date": [
+                        "Fri, 17 Jan 2020 00:32:27 GMT"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "referrer-policy": [
+                        "no-referrer"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 5.2.2+620-6ee2ba170fe6+20200116)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-length": [
+                        "594"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "body": "params=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22data%22%3A+%22%7B%5C%22a692622e693757674f85ff481c7ff77057a7f82a%5C%22%3A+%7B%5C%22author%5C%22%3A+%5C%22test%5C%22%2C+%5C%22authorEmail%5C%22%3A+%5C%22test%5C%22%2C+%5C%22branch%5C%22%3A+%5C%22default%5C%22%2C+%5C%22commit%5C%22%3A+%5C%22a692622e693757674f85ff481c7ff77057a7f82a%5C%22%2C+%5C%22parents%5C%22%3A+%5B%5C%22c44b38f24a454b288ac6977d9d75f71df027994b%5C%22%5D%2C+%5C%22time%5C%22%3A+0%7D%7D%22%2C+%22diff_id%22%3A+19392%2C+%22name%22%3A+%22local%3Acommits%22%7D&output=json&__conduit__=1"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- a/tests/pullext.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/pullext.py	Tue Jan 21 13:14:51 2020 -0500
@@ -18,12 +18,12 @@
 
 
 def clonecommand(orig, ui, repo, *args, **kwargs):
-    if kwargs.get(r'include') or kwargs.get(r'exclude'):
-        kwargs[r'narrow'] = True
+    if kwargs.get('include') or kwargs.get('exclude'):
+        kwargs['narrow'] = True
 
-    if kwargs.get(r'depth'):
+    if kwargs.get('depth'):
         try:
-            kwargs[r'depth'] = int(kwargs[r'depth'])
+            kwargs['depth'] = int(kwargs['depth'])
         except ValueError:
             raise error.Abort(_('--depth must be an integer'))
 
--- a/tests/run-tests.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/run-tests.py	Tue Jan 21 13:14:51 2020 -0500
@@ -1363,6 +1363,20 @@
         if PYTHON3 and os.name == 'nt':
             env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
 
+        # Modified HOME in test environment can confuse Rust tools. So set
+        # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
+        # present and these variables aren't already defined.
+        cargo_home_path = os.path.expanduser('~/.cargo')
+        rustup_home_path = os.path.expanduser('~/.rustup')
+
+        if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
+            env['CARGO_HOME'] = cargo_home_path
+        if (
+            os.path.exists(rustup_home_path)
+            and b'RUSTUP_HOME' not in osenvironb
+        ):
+            env['RUSTUP_HOME'] = rustup_home_path
+
         # Reset some environment variables to well-known values so that
         # the tests produce repeatable output.
         env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
@@ -1825,7 +1839,7 @@
                 cmd = rawcmd.split()
                 toggletrace(rawcmd)
                 if len(cmd) == 2 and cmd[0] == b'cd':
-                    l = b'  $ cd %s || exit 1\n' % cmd[1]
+                    rawcmd = b'cd %s || exit 1\n' % cmd[1]
                 script.append(rawcmd)
             elif l.startswith(b'  > '):  # continuations
                 after.setdefault(prepos, []).append(l)
@@ -1973,7 +1987,11 @@
     @staticmethod
     def rematch(el, l):
         try:
-            el = b'(?:' + el + b')'
+            # parse any flags at the beginning of the regex. Only 'i' is
+            # supported right now, but this should be easy to extend.
+            flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
+            flags = flags or b''
+            el = flags + b'(?:' + el + b')'
             # use \Z to ensure that the regex matches to the end of the string
             if os.name == 'nt':
                 return re.match(el + br'\r?\n\Z', l)
@@ -2246,27 +2264,31 @@
         # os.times module computes the user time and system time spent by
         # child's processes along with real elapsed time taken by a process.
         # This module has one limitation. It can only work for Linux user
-        # and not for Windows.
-        test.started = os.times()
+        # and not for Windows. Hence why we fall back to another function
+        # for wall time calculations.
+        test.started_times = os.times()
+        # TODO use a monotonic clock once support for Python 2.7 is dropped.
+        test.started_time = time.time()
         if self._firststarttime is None:  # thread racy but irrelevant
-            self._firststarttime = test.started[4]
+            self._firststarttime = test.started_time
 
     def stopTest(self, test, interrupted=False):
         super(TestResult, self).stopTest(test)
 
-        test.stopped = os.times()
-
-        starttime = test.started
-        endtime = test.stopped
+        test.stopped_times = os.times()
+        stopped_time = time.time()
+
+        starttime = test.started_times
+        endtime = test.stopped_times
         origin = self._firststarttime
         self.times.append(
             (
                 test.name,
                 endtime[2] - starttime[2],  # user space CPU time
                 endtime[3] - starttime[3],  # sys  space CPU time
-                endtime[4] - starttime[4],  # real time
-                starttime[4] - origin,  # start date in run context
-                endtime[4] - origin,  # end date in run context
+                stopped_time - test.started_time,  # real time
+                test.started_time - origin,  # start date in run context
+                stopped_time - origin,  # end date in run context
             )
         )
 
@@ -3157,9 +3179,7 @@
                 expanded_args.append(arg)
         args = expanded_args
 
-        testcasepattern = re.compile(
-            br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-\.#]+))'
-        )
+        testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
         tests = []
         for t in args:
             case = []
--- a/tests/simplestorerepo.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/simplestorerepo.py	Tue Jan 21 13:14:51 2020 -0500
@@ -48,9 +48,9 @@
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
-testedwith = 'ships-with-hg-core'
+testedwith = b'ships-with-hg-core'
 
-REQUIREMENT = 'testonly-simplestore'
+REQUIREMENT = b'testonly-simplestore'
 
 
 def validatenode(node):
@@ -204,7 +204,7 @@
             if entry[b'node'] == node:
                 return rev
 
-        raise error.ProgrammingError('this should not occur')
+        raise error.ProgrammingError(b'this should not occur')
 
     def node(self, rev):
         validaterev(rev)
--- a/tests/test-amend-subrepo.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-amend-subrepo.t	Tue Jan 21 13:14:51 2020 -0500
@@ -164,4 +164,35 @@
   R .hgsub
   R .hgsubstate
 
+broken repositories will refuse to push
+
+#if obsstore-off
+  $ hg up -q -C 2
+#else
+  $ hg up -q -C 6
+#endif
+  $ echo c >> t/b
+  $ hg amend -q -R t
+
+  $ hg init ../dest
+  $ hg init ../dest/t
+  $ hg init ../dest/s
+  $ hg push -q ../dest
+  abort: subrepo 't' is hidden in revision 04aa62396ec6 (obsstore-on !)
+  abort: subrepo 't' not found in revision 04aa62396ec6 (obsstore-off !)
+  [255]
+
+... unless forced
+
+  $ hg push --force -q ../dest
+  $ hg verify -R ../dest
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 5 changesets with 12 changes to 4 files
+  checking subrepo links
+  subrepo 't' not found in revision 04aa62396ec6
+  subrepo 't' not found in revision 6bce99600681
+
   $ cd ..
--- a/tests/test-amend.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-amend.t	Tue Jan 21 13:14:51 2020 -0500
@@ -129,7 +129,7 @@
 
   $ echo FOO > $TESTTMP/msg
   $ hg amend -l $TESTTMP/msg -m BAR
-  abort: options --message and --logfile are mutually exclusive
+  abort: cannot specify both --message and --logfile
   [255]
   $ hg amend -l $TESTTMP/msg
   saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/974f07f28537-edb6470a-amend.hg (obsstore-off !)
@@ -447,7 +447,7 @@
 Bad combination of date options:
 
   $ hg amend -D --date '0 0'
-  abort: --date and --currentdate are mutually exclusive
+  abort: cannot specify both --date and --currentdate
   [255]
 
 Close branch
@@ -476,3 +476,33 @@
    a |  2 +-
    b |  2 +-
    2 files changed, 2 insertions(+), 2 deletions(-)
+
+Modifying a file while the editor is open can cause dirstate corruption
+(issue6233)
+
+  $ cd $TESTTMP
+  $ hg init modify-during-amend; cd modify-during-amend
+  $ echo r0 > foo; hg commit -qAm "r0"
+  $ echo alpha > foo; hg commit -qm "alpha"
+  $ echo beta >> foo
+  $ cat > $TESTTMP/sleepy_editor.sh <<EOF
+  > echo hi > "\$1"
+  > sleep 3
+  > EOF
+  $ HGEDITOR="sh $TESTTMP/sleepy_editor.sh" hg commit --amend &
+  $ sleep 1
+  $ echo delta >> foo
+  $ sleep 3
+  $ if (hg diff -c . | grep 'delta' >/dev/null) || [ -n "$(hg status)" ]; then
+  >   echo "OK."
+  > else
+  >   echo "Bug detected. 'delta' is not part of the commit OR the wdir"
+  >   echo "Diff and status before rebuild:"
+  >   hg diff
+  >   hg status
+  >   hg debugrebuilddirstate
+  >   echo "Diff and status after rebuild:"
+  >   hg diff
+  >   hg status
+  > fi
+  OK.
--- a/tests/test-archive.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-archive.t	Tue Jan 21 13:14:51 2020 -0500
@@ -574,8 +574,8 @@
 
 #if py3
   $ hg archive ../archive.txz
-  $ xz -l ../archive.txz | head -n1
-  Strms  Blocks   Compressed Uncompressed  Ratio  Check   Filename
+  $ which xz >/dev/null && xz -l ../archive.txz | head -n1 || true
+  Strms  Blocks   Compressed Uncompressed  Ratio  Check   Filename (xz !)
   $ rm -f ../archive.txz
 #else
   $ hg archive ../archive.txz
--- a/tests/test-blackbox.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-blackbox.t	Tue Jan 21 13:14:51 2020 -0500
@@ -57,8 +57,15 @@
 
 unhandled exception
   $ rm ./.hg/blackbox.log
-  $ hg crash 2> /dev/null
+#if chg
+ (chg exits 255 because it fails to receive an exit code)
+  $ hg crash 2>/dev/null
+  [255]
+#else
+ (hg exits 1 because Python default exit code for uncaught exception is 1)
+  $ hg crash 2>/dev/null
   [1]
+#endif
   $ hg blackbox -l 2
   1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> crash exited 1 after * seconds (glob)
   1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox -l 2
@@ -390,7 +397,7 @@
   > from mercurial import registrar, scmutil
   > cmdtable = {}
   > command = registrar.command(cmdtable)
-  > @command('noop')
+  > @command(b'noop')
   > def noop(ui, repo):
   >     pass
   > EOF
--- a/tests/test-bookmarks.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-bookmarks.t	Tue Jan 21 13:14:51 2020 -0500
@@ -81,10 +81,10 @@
   abort: bookmark 'A' does not exist
   [255]
   $ hg bookmarks -l -r0
-  abort: --rev is incompatible with --list
+  abort: cannot specify both --list and --rev
   [255]
   $ hg bookmarks -l --inactive
-  abort: --inactive is incompatible with --list
+  abort: cannot specify both --inactive and --list
   [255]
 
   $ hg log -T '{bookmarks % "{rev} {bookmark}\n"}'
@@ -347,7 +347,7 @@
 delete with --inactive
 
   $ hg bookmark -d --inactive Y
-  abort: --inactive is incompatible with --delete
+  abort: cannot specify both --inactive and --delete
   [255]
 
 bookmark name with spaces should be stripped
@@ -475,15 +475,15 @@
   $ cd repo
 
   $ hg bookmark -m Y -d Z
-  abort: --delete and --rename are incompatible
+  abort: cannot specify both --delete and --rename
   [255]
 
   $ hg bookmark -r 1 -d Z
-  abort: --rev is incompatible with --delete
+  abort: cannot specify both --delete and --rev
   [255]
 
   $ hg bookmark -r 1 -m Z Y
-  abort: --rev is incompatible with --rename
+  abort: cannot specify both --rename and --rev
   [255]
 
 force bookmark with existing name
@@ -608,6 +608,27 @@
   $ hg bookmark --inactive Z
   $ hg bookmark Z
 
+deactivate current 'Z', but also add 'Y'
+
+  $ hg bookmark -d Y
+  $ hg bookmark --inactive Z Y
+  $ hg bookmark -l
+     X2                        1:925d80f479bb
+     Y                         2:db815d6d32e6
+     Z                         2:db815d6d32e6
+     x  y                      2:db815d6d32e6
+  $ hg bookmark Z
+
+bookmark wdir to activate it (issue6218)
+
+  $ hg bookmark -d Z
+  $ hg bookmark -r 'wdir()' Z
+  $ hg bookmark -l
+     X2                        1:925d80f479bb
+     Y                         2:db815d6d32e6
+   * Z                         2:db815d6d32e6
+     x  y                      2:db815d6d32e6
+
 test clone
 
   $ hg bookmark -r 2 -i @
--- a/tests/test-byteify-strings.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-byteify-strings.t	Tue Jan 21 13:14:51 2020 -0500
@@ -1,7 +1,7 @@
 #require py37
 
   $ byteify_strings () {
-  >   $PYTHON "$TESTDIR/../contrib/byteify-strings.py" "$@"
+  >   "$PYTHON" "$TESTDIR/../contrib/byteify-strings.py" "$@"
   > }
 
 Test version
--- a/tests/test-cbor.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-cbor.py	Tue Jan 21 13:14:51 2020 -0500
@@ -1230,7 +1230,7 @@
             True,
             False,
             None,
-            [None for i in range(128)],
+            [None] * 128,
         ]
 
         encoded = b''.join(cborutil.streamencode(source))
--- a/tests/test-censor.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-censor.t	Tue Jan 21 13:14:51 2020 -0500
@@ -442,6 +442,33 @@
   checking files
   checked 14 changesets with 15 changes to 2 files
 
+Grepping only warns, doesn't error out
+
+  $ cd ../rpull
+  $ hg grep 'Normal file'
+  bystander:Normal file v2
+  $ hg grep nothing
+  target:Re-sanitized; nothing to see here
+  $ hg grep --diff 'Normal file'
+  cannot search in censored file: target:7
+  cannot search in censored file: target:10
+  cannot search in censored file: target:12
+  bystander:6:-:Normal file v2
+  cannot search in censored file: target:1
+  cannot search in censored file: target:2
+  cannot search in censored file: target:3
+  bystander:2:-:Normal file here
+  bystander:2:+:Normal file v2
+  bystander:0:+:Normal file here
+  $ hg grep --diff nothing
+  cannot search in censored file: target:7
+  cannot search in censored file: target:10
+  cannot search in censored file: target:12
+  target:13:+:Re-sanitized; nothing to see here
+  cannot search in censored file: target:1
+  cannot search in censored file: target:2
+  cannot search in censored file: target:3
+
 Censored nodes can be imported on top of censored nodes, consecutively
 
   $ hg init ../rimport
--- a/tests/test-check-code.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-check-code.t	Tue Jan 21 13:14:51 2020 -0500
@@ -21,13 +21,14 @@
   Skipping contrib/automation/hgautomation/try_server.py it has no-che?k-code (glob)
   Skipping contrib/automation/hgautomation/windows.py it has no-che?k-code (glob)
   Skipping contrib/automation/hgautomation/winrm.py it has no-che?k-code (glob)
+  Skipping contrib/fuzz/FuzzedDataProvider.h it has no-che?k-code (glob)
+  Skipping contrib/fuzz/standalone_fuzz_target_runner.cc it has no-che?k-code (glob)
+  Skipping contrib/packaging/hgpackaging/cli.py it has no-che?k-code (glob)
   Skipping contrib/packaging/hgpackaging/downloads.py it has no-che?k-code (glob)
   Skipping contrib/packaging/hgpackaging/inno.py it has no-che?k-code (glob)
   Skipping contrib/packaging/hgpackaging/py2exe.py it has no-che?k-code (glob)
   Skipping contrib/packaging/hgpackaging/util.py it has no-che?k-code (glob)
   Skipping contrib/packaging/hgpackaging/wix.py it has no-che?k-code (glob)
-  Skipping contrib/packaging/inno/build.py it has no-che?k-code (glob)
-  Skipping contrib/packaging/wix/build.py it has no-che?k-code (glob)
   Skipping i18n/polib.py it has no-che?k-code (glob)
   Skipping mercurial/statprof.py it has no-che?k-code (glob)
   Skipping tests/badserverext.py it has no-che?k-code (glob)
@@ -84,3 +85,9 @@
 Keep python3 tests sorted:
   $ sort < contrib/python3-whitelist > $TESTTMP/py3sorted
   $ cmp contrib/python3-whitelist $TESTTMP/py3sorted || echo 'Please sort passing tests!'
+
+Keep Windows line endings in check
+
+  $ hg files 'set:eol(dos)'
+  contrib/win32/hg.bat
+  contrib/win32/mercurial.ini
--- a/tests/test-check-config.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-check-config.t	Tue Jan 21 13:14:51 2020 -0500
@@ -24,7 +24,7 @@
   > EOF
 
   $ cat > files << EOF
-  > mercurial/help/config.txt
+  > mercurial/helptext/config.txt
   > $TESTTMP/testfile.py
   > EOF
 
--- a/tests/test-check-format.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-check-format.t	Tue Jan 21 13:14:51 2020 -0500
@@ -1,5 +1,5 @@
 #require black
 
   $ cd $RUNTESTDIR/..
-  $ black --config=black.toml --check --diff `hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"'`
+  $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/** - "contrib/python-zstandard/**"'`
 
--- a/tests/test-check-py3-compat.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-check-py3-compat.t	Tue Jan 21 13:14:51 2020 -0500
@@ -8,6 +8,7 @@
   > -X contrib/automation/ \
   > -X contrib/packaging/hgpackaging/ \
   > -X contrib/packaging/inno/ \
+  > -X contrib/packaging/packaging.py \
   > -X contrib/packaging/wix/ \
   > -X hgdemandimport/demandimportpy2.py \
   > -X mercurial/thirdparty/cbor \
@@ -40,9 +41,9 @@
   hgext/infinitepush/sqlindexapi.py: error importing: <*Error> No module named 'mysql' (error at sqlindexapi.py:*) (glob) (?)
   mercurial/scmwindows.py: error importing: <ValueError> _type_ 'v' not supported (error at win32.py:*) (no-windows !)
   mercurial/win32.py: error importing: <ValueError> _type_ 'v' not supported (error at win32.py:*) (no-windows !)
-  mercurial/windows.py: error importing: <ModuleNotFoundError> No module named 'msvcrt' (error at windows.py:*) (no-windows !)
-  mercurial/posix.py: error importing: <ModuleNotFoundError> No module named 'fcntl' (error at posix.py:*) (windows !)
-  mercurial/scmposix.py: error importing: <ModuleNotFoundError> No module named 'fcntl' (error at scmposix.py:*) (windows !)
+  mercurial/windows.py: error importing: <*Error> No module named 'msvcrt' (error at windows.py:*) (glob) (no-windows !)
+  mercurial/posix.py: error importing: <*Error> No module named 'fcntl' (error at posix.py:*) (glob) (windows !)
+  mercurial/scmposix.py: error importing: <*Error> No module named 'fcntl' (error at scmposix.py:*) (glob) (windows !)
 #endif
 
 #if py3 pygments
--- a/tests/test-check-pyflakes.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-check-pyflakes.t	Tue Jan 21 13:14:51 2020 -0500
@@ -24,4 +24,5 @@
   contrib/perf.py:*: undefined name 'xrange' (glob) (?)
   mercurial/hgweb/server.py:*: undefined name 'reload' (glob) (?)
   mercurial/util.py:*: undefined name 'file' (glob) (?)
+  mercurial/encoding.py:*: undefined name 'localstr' (glob) (?)
   
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-check-rust-format.t	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,9 @@
+#require rustfmt test-repo
+
+  $ . "$TESTDIR/helpers-testrepo.sh"
+
+  $ cd "$TESTDIR"/..
+  $ RUSTFMT=$(rustup which --toolchain nightly rustfmt)
+  $ for f in `testrepohg files 'glob:**/*.rs'` ; do
+  >   $RUSTFMT --check --unstable-features --color=never $f
+  > done
--- a/tests/test-check-shbang.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-check-shbang.t	Tue Jan 21 13:14:51 2020 -0500
@@ -11,6 +11,7 @@
 In tests, enforce $PYTHON and *not* /usr/bin/env python or similar:
   $ testrepohg files 'set:grep(r"#!.*?python") and **/*.t' \
   > -X tests/test-check-execute.t \
+  > -X tests/test-check-format.t \
   > -X tests/test-check-module-imports.t \
   > -X tests/test-check-pyflakes.t \
   > -X tests/test-check-shbang.t
--- a/tests/test-chg.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-chg.t	Tue Jan 21 13:14:51 2020 -0500
@@ -229,11 +229,13 @@
   server.log.1
 
 print only the last 10 lines, since we aren't sure how many records are
-preserved:
+preserved (since setprocname isn't available on py3, the 10th-most-recent line
+is different when using py3):
 
   $ cat log/server.log.1 log/server.log | tail -10 | filterlog
+  YYYY/MM/DD HH:MM:SS (PID)> confighash = ... mtimehash = ... (py3 !)
   YYYY/MM/DD HH:MM:SS (PID)> forked worker process (pid=...)
-  YYYY/MM/DD HH:MM:SS (PID)> setprocname: ...
+  YYYY/MM/DD HH:MM:SS (PID)> setprocname: ... (no-py3 !)
   YYYY/MM/DD HH:MM:SS (PID)> received fds: ...
   YYYY/MM/DD HH:MM:SS (PID)> chdir to '$TESTTMP/extreload'
   YYYY/MM/DD HH:MM:SS (PID)> setumask 18
@@ -329,3 +331,25 @@
   YYYY/MM/DD HH:MM:SS (PID)> loaded repo into cache: $TESTTMP/cached2 (in  ...s)
   YYYY/MM/DD HH:MM:SS (PID)> log -R cached
   YYYY/MM/DD HH:MM:SS (PID)> loaded repo into cache: $TESTTMP/cached (in  ...s)
+
+Test that chg works even when python "coerces" the locale (py3.7+, which is done
+by default if none of LC_ALL, LC_CTYPE, or LANG are set in the environment)
+
+  $ cat > $TESTTMP/debugenv.py <<EOF
+  > from mercurial import encoding
+  > from mercurial import registrar
+  > cmdtable = {}
+  > command = registrar.command(cmdtable)
+  > @command(b'debugenv', [], b'', norepo=True)
+  > def debugenv(ui):
+  >     for k in [b'LC_ALL', b'LC_CTYPE', b'LANG']:
+  >         v = encoding.environ.get(k)
+  >         if v is not None:
+  >             ui.write(b'%s=%s\n' % (k, encoding.environ[k]))
+  > EOF
+  $ LANG= LC_ALL= LC_CTYPE= chg \
+  >    --config extensions.debugenv=$TESTTMP/debugenv.py debugenv
+  LC_ALL=
+  LC_CTYPE=C.UTF-8 (py37 !)
+  LC_CTYPE= (no-py37 !)
+  LANG=
--- a/tests/test-commit-interactive-curses.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-commit-interactive-curses.t	Tue Jan 21 13:14:51 2020 -0500
@@ -95,7 +95,7 @@
 - unfold it
 - go down to second hunk (1 for the first hunk, 1 for the first hunkline, 1 for the second hunk, 1 for the second hunklike)
 - toggle the second hunk
-- toggle on and off the amend mode (to check that it toggles off)
+- toggle all lines twice (to check that it does nothing)
 - edit the hunk and quit the editor immediately with non-zero status
 - commit
 
@@ -193,20 +193,39 @@
   $ hg st
   ? testModeCommands
 
-Amend option works
+Test toggling all selections works
+
+- Change one line
+- Add an extra line at the end
+- Unselect all
+- Select the extra line at the end
+- Toggle all selections (so the extra line at the is unselected and the modified line is selected)
+- Commit
+
   $ echo "hello world" > x
-  $ hg diff -c .
-  diff -r a6735021574d -r 2b0e9be4d336 x
-  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  $ echo "goodbye world" >> x
+  $ hg diff
+  diff -r 2b0e9be4d336 x
+  --- a/x	Thu Jan 01 00:00:00 1970 +0000
   +++ b/x	Thu Jan 01 00:00:00 1970 +0000
-  @@ -0,0 +1,1 @@
-  +hello
+  @@ -1,1 +1,2 @@
+  -hello
+  +hello world
+  +goodbye world
   $ cat <<EOF >testModeCommands
+  > f
+  > j
+  > x
+  > j
+  > j
+  > j
+  > x
   > a
   > c
   > EOF
-  $ hg commit -i  -m "newly added file" -d "0 0"
+  $ hg commit -i --amend  -m "newly added file" -d "0 0" x
   saved backup bundle to $TESTTMP/a/.hg/strip-backup/2b0e9be4d336-3cf0bc8c-amend.hg
+  $ hg rev x --no-backup
   $ hg diff -c .
   diff -r a6735021574d -r c1d239d165ae x
   --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
--- a/tests/test-completion.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-completion.t	Tue Jan 21 13:14:51 2020 -0500
@@ -328,7 +328,7 @@
   heads: rev, topo, active, closed, style, template
   help: extension, command, keyword, system
   identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
-  import: strip, base, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
+  import: strip, base, secret, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
   incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
   init: ssh, remotecmd, insecure
   locate: rev, print0, fullpath, include, exclude
--- a/tests/test-config-env.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-config-env.py	Tue Jan 21 13:14:51 2020 -0500
@@ -6,6 +6,7 @@
 
 from mercurial import (
     encoding,
+    extensions,
     rcutil,
     ui as uimod,
     util,
@@ -35,9 +36,10 @@
     return [join(b'userrc')]
 
 
+extensions.wrapfunction(rcutil, 'default_rc_resources', lambda orig: [])
+
 rcutil.systemrcpath = systemrcpath
 rcutil.userrcpath = userrcpath
-os.path.isdir = lambda x: False  # hack: do not load default.d/*.rc
 
 # utility to print configs
 def printconfigs(env):
--- a/tests/test-conflict.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-conflict.t	Tue Jan 21 13:14:51 2020 -0500
@@ -63,12 +63,20 @@
   $ hg status -Tjson
   [
    {
+    "itemtype": "file",
     "path": "a",
-    "status": "M"
+    "status": "M",
+    "unresolved": true
    },
    {
+    "itemtype": "file",
     "path": "a.orig",
     "status": "?"
+   },
+   {
+    "itemtype": "morestatus",
+    "unfinished": "merge",
+    "unfinishedmsg": "To continue:    hg commit\nTo abort:       hg merge --abort"
    }
   ]
 
--- a/tests/test-contrib-perf.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-contrib-perf.t	Tue Jan 21 13:14:51 2020 -0500
@@ -248,6 +248,7 @@
   $ hg perfrevset 'all()'
   $ hg perfstartup
   $ hg perfstatus
+  $ hg perfstatus --dirstate
   $ hg perftags
   $ hg perftemplating
   $ hg perfvolatilesets
--- a/tests/test-demandimport.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-demandimport.py	Tue Jan 21 13:14:51 2020 -0500
@@ -22,6 +22,10 @@
 if sys.flags.optimize:
     sys.exit(80)
 
+# The demand importer doesn't work on Python 3.5.
+if sys.version_info[0:2] == (3, 5):
+    sys.exit(80)
+
 if ispy3:
     from importlib.util import _LazyModule
 
@@ -133,7 +137,7 @@
 from mercurial import hgweb
 
 if ispy3:
-    assert not isinstance(hgweb, _LazyModule)
+    assert isinstance(hgweb, _LazyModule)
     assert f(hgweb) == "<module 'mercurial.hgweb' from '?'>", f(hgweb)
     assert isinstance(hgweb.hgweb_mod, _LazyModule)
     assert (
@@ -206,7 +210,7 @@
 import telnetlib
 
 if ispy3:
-    assert not isinstance(telnetlib, _LazyModule)
+    assert isinstance(telnetlib, _LazyModule)
     assert f(telnetlib) == "<module 'telnetlib' from '?'>"
 else:
     assert f(telnetlib) == "<unloaded module 'telnetlib'>", f(telnetlib)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-dirs.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,27 @@
+from __future__ import absolute_import
+
+import unittest
+
+import silenttestrunner
+
+from mercurial import pathutil
+
+
+class dirstests(unittest.TestCase):
+    def testdirs(self):
+        for case, want in [
+            (b'a/a/a', [b'a', b'a/a', b'']),
+            (b'alpha/beta/gamma', [b'', b'alpha', b'alpha/beta']),
+        ]:
+            d = pathutil.dirs({})
+            d.addpath(case)
+            self.assertEqual(sorted(d), sorted(want))
+
+    def testinvalid(self):
+        with self.assertRaises(ValueError):
+            d = pathutil.dirs({})
+            d.addpath(b'a//b')
+
+
+if __name__ == '__main__':
+    silenttestrunner.main(__name__)
--- a/tests/test-docker-packaging.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-docker-packaging.t	Tue Jan 21 13:14:51 2020 -0500
@@ -10,7 +10,7 @@
   $ export OUTPUTDIR
 
   $ cd "$TESTDIR"/..
-  $ make docker-debian-jessie > $OUTPUTDIR/build.log 2>&1
+  $ make docker-debian-buster > $OUTPUTDIR/build.log 2>&1
   $ cd $OUTPUTDIR
   $ ls *.deb
   mercurial-common_*.deb (glob)
--- a/tests/test-doctest.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-doctest.py	Tue Jan 21 13:14:51 2020 -0500
@@ -82,6 +82,7 @@
 testmod('mercurial.url')
 testmod('mercurial.util')
 testmod('mercurial.util', testtarget='platform')
+testmod('mercurial.utils.dateutil')
 testmod('mercurial.utils.stringutil')
 testmod('hgext.convert.convcmd')
 testmod('hgext.convert.cvsps')
--- a/tests/test-extdiff.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-extdiff.t	Tue Jan 21 13:14:51 2020 -0500
@@ -515,3 +515,31 @@
   $ LC_MESSAGES=ja_JP.UTF-8 hg --config hgext.extdiff= --config extdiff.cmd.td=$U help td \
   > | grep "^      '"
         '\xa5\xa5'
+
+  $ cd $TESTTMP
+
+Test that diffing a single file works, even if that file is new
+
+  $ hg init testsinglefile
+  $ cd testsinglefile
+  $ echo a > a
+  $ hg add a
+  $ hg falabala
+  diffing nul "*\\a" (glob) (windows !)
+  diffing /dev/null */a (glob) (no-windows !)
+  [1]
+  $ hg ci -qm a
+  $ hg falabala -c .
+  diffing nul "*\\a" (glob) (windows !)
+  diffing /dev/null */a (glob) (no-windows !)
+  [1]
+  $ echo a >> a
+  $ hg falabala
+  diffing "*\\a" "*\\a" (glob) (windows !)
+  diffing */a */a (glob) (no-windows !)
+  [1]
+  $ hg ci -qm 2a
+  $ hg falabala -c .
+  diffing "*\\a" "*\\a" (glob) (windows !)
+  diffing */a */a (glob) (no-windows !)
+  [1]
--- a/tests/test-fileset.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-fileset.t	Tue Jan 21 13:14:51 2020 -0500
@@ -853,7 +853,7 @@
   M b2
   A 1k
   A 2k
-  A b2link (no-windows !)
+  A b2link (symlink !)
   A bin
   A c1
   A con.xml (no-windows !)
@@ -864,7 +864,7 @@
   M b2
   A 1k
   A 2k
-  A b2link (no-windows !)
+  A b2link (symlink !)
   A bin
   A c1
   A con.xml (no-windows !)
@@ -997,7 +997,7 @@
   A .hgsubstate
   A 1k
   A 2k
-  A b2link (no-windows !)
+  A b2link (symlink !)
   A bin
   A c1
   A con.xml (no-windows !)
@@ -1006,7 +1006,7 @@
   .hgsubstate
   1k
   2k
-  b2link (no-windows !)
+  b2link (symlink !)
   bin
   c1
   con.xml (no-windows !)
--- a/tests/test-fix.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-fix.t	Tue Jan 21 13:14:51 2020 -0500
@@ -264,10 +264,12 @@
   $ hg commit -Aqm "hello"
   $ hg phase -r 0 --public
   $ hg fix -r 0
-  abort: can't fix immutable changeset 0:6470986d2e7b
+  abort: cannot fix public changesets
+  (see 'hg help phases' for details)
   [255]
   $ hg fix -r 0 --working-dir
-  abort: can't fix immutable changeset 0:6470986d2e7b
+  abort: cannot fix public changesets
+  (see 'hg help phases' for details)
   [255]
   $ hg cat -r tip hello.whole
   hello
@@ -1171,7 +1173,7 @@
   $ printf "two\n" > foo.whole
   $ hg commit -m "second"
   $ hg --config experimental.evolution.allowunstable=False fix -r '.^'
-  abort: can only fix a changeset together with all its descendants
+  abort: cannot fix changeset with children
   [255]
   $ hg fix -r '.^'
   1 new orphan changesets
@@ -1301,10 +1303,13 @@
   > [fix]
   > printcwd:command = "$PYTHON" -c "import os; print(os.getcwd())"
   > printcwd:pattern = relpath:foo/bar
+  > filesetpwd:command = "$PYTHON" -c "import os; print('fs: ' + os.getcwd())"
+  > filesetpwd:pattern = set:**quux
   > EOF
 
   $ mkdir foo
   $ printf "bar\n" > foo/bar
+  $ printf "quux\n" > quux
   $ hg commit -Aqm blah
 
   $ hg fix -w -r . foo/bar
@@ -1316,15 +1321,35 @@
   $ cd foo
 
   $ hg fix -w -r . bar
-  $ hg cat -r tip bar
+  $ hg cat -r tip bar ../quux
   $TESTTMP/subprocesscwd
-  $ cat bar
+  quux
+  $ cat bar ../quux
   $TESTTMP/subprocesscwd
+  quux
   $ echo modified > bar
   $ hg fix -w bar
   $ cat bar
   $TESTTMP/subprocesscwd
 
+Apparently fixing p1() and its descendants doesn't include wdir() unless
+explicitly stated.
+
+  $ hg fix -r '.::'
+  $ hg cat -r . ../quux
+  quux
+  $ hg cat -r tip ../quux
+  fs: $TESTTMP/subprocesscwd
+  $ cat ../quux
+  quux
+
+Clean files are not fixed unless explicitly named
+  $ echo 'dirty' > ../quux
+
+  $ hg fix --working-dir
+  $ cat ../quux
+  fs: $TESTTMP/subprocesscwd
+
   $ cd ../..
 
 Tools configured without a pattern are ignored. It would be too dangerous to
@@ -1427,3 +1452,242 @@
   2 through 2
 
   $ cd ..
+
+Test various cases around merges. We were previously dropping files if they were
+created on only the p2 side of the merge, so let's test permutations of:
+*   added, was fixed
+*   added, considered for fixing but was already good
+*   added, not considered for fixing
+*   modified, was fixed
+*   modified, considered for fixing but was already good
+*   modified, not considered for fixing
+
+Before the bug was fixed where we would drop files, this test demonstrated the
+following issues:
+*   new_in_r1.ignored, new_in_r1_already_good.changed, and
+>   mod_in_r1_already_good.changed were NOT in the manifest for the merge commit
+*   mod_in_r1.ignored had its contents from r0, NOT r1.
+
+We're also setting a named branch for every commit to demonstrate that the
+branch is kept intact and there aren't issues updating to another branch in the
+middle of fix.
+
+  $ hg init merge_keeps_files
+  $ cd merge_keeps_files
+  $ for f in r0 mod_in_r1 mod_in_r2 mod_in_merge mod_in_child; do
+  >   for c in changed whole ignored; do
+  >     printf "hello\n" > $f.$c
+  >   done
+  >   printf "HELLO\n" > "mod_in_${f}_already_good.changed"
+  > done
+  $ hg branch -q r0
+  $ hg ci -Aqm 'r0'
+  $ hg phase -p
+  $ make_test_files() {
+  >   printf "world\n" >> "mod_in_$1.changed"
+  >   printf "world\n" >> "mod_in_$1.whole"
+  >   printf "world\n" >> "mod_in_$1.ignored"
+  >   printf "WORLD\n" >> "mod_in_$1_already_good.changed"
+  >   printf "new in $1\n" > "new_in_$1.changed"
+  >   printf "new in $1\n" > "new_in_$1.whole"
+  >   printf "new in $1\n" > "new_in_$1.ignored"
+  >   printf "ALREADY GOOD, NEW IN THIS REV\n" > "new_in_$1_already_good.changed"
+  > }
+  $ make_test_commit() {
+  >   make_test_files "$1"
+  >   hg branch -q "$1"
+  >   hg ci -Aqm "$2"
+  > }
+  $ make_test_commit r1 "merge me, pt1"
+  $ hg co -q ".^"
+  $ make_test_commit r2 "merge me, pt2"
+  $ hg merge -qr 1
+  $ make_test_commit merge "evil merge"
+  $ make_test_commit child "child of merge"
+  $ make_test_files wdir
+  $ hg fix -r 'not public()' -w
+  $ hg log -G -T'{rev}:{shortest(node,8)}: branch:{branch} desc:{desc}'
+  @  8:c22ce900: branch:child desc:child of merge
+  |
+  o    7:5a30615a: branch:merge desc:evil merge
+  |\
+  | o  6:4e5acdc4: branch:r2 desc:merge me, pt2
+  | |
+  o |  5:eea01878: branch:r1 desc:merge me, pt1
+  |/
+  o  0:0c548d87: branch:r0 desc:r0
+  
+  $ hg files -r tip
+  mod_in_child.changed
+  mod_in_child.ignored
+  mod_in_child.whole
+  mod_in_child_already_good.changed
+  mod_in_merge.changed
+  mod_in_merge.ignored
+  mod_in_merge.whole
+  mod_in_merge_already_good.changed
+  mod_in_mod_in_child_already_good.changed
+  mod_in_mod_in_merge_already_good.changed
+  mod_in_mod_in_r1_already_good.changed
+  mod_in_mod_in_r2_already_good.changed
+  mod_in_r0_already_good.changed
+  mod_in_r1.changed
+  mod_in_r1.ignored
+  mod_in_r1.whole
+  mod_in_r1_already_good.changed
+  mod_in_r2.changed
+  mod_in_r2.ignored
+  mod_in_r2.whole
+  mod_in_r2_already_good.changed
+  new_in_child.changed
+  new_in_child.ignored
+  new_in_child.whole
+  new_in_child_already_good.changed
+  new_in_merge.changed
+  new_in_merge.ignored
+  new_in_merge.whole
+  new_in_merge_already_good.changed
+  new_in_r1.changed
+  new_in_r1.ignored
+  new_in_r1.whole
+  new_in_r1_already_good.changed
+  new_in_r2.changed
+  new_in_r2.ignored
+  new_in_r2.whole
+  new_in_r2_already_good.changed
+  r0.changed
+  r0.ignored
+  r0.whole
+  $ for f in "$(hg files -r tip)"; do hg cat -r tip $f -T'{path}:\n{data}\n'; done
+  mod_in_child.changed:
+  hello
+  WORLD
+  
+  mod_in_child.ignored:
+  hello
+  world
+  
+  mod_in_child.whole:
+  HELLO
+  WORLD
+  
+  mod_in_child_already_good.changed:
+  WORLD
+  
+  mod_in_merge.changed:
+  hello
+  WORLD
+  
+  mod_in_merge.ignored:
+  hello
+  world
+  
+  mod_in_merge.whole:
+  HELLO
+  WORLD
+  
+  mod_in_merge_already_good.changed:
+  WORLD
+  
+  mod_in_mod_in_child_already_good.changed:
+  HELLO
+  
+  mod_in_mod_in_merge_already_good.changed:
+  HELLO
+  
+  mod_in_mod_in_r1_already_good.changed:
+  HELLO
+  
+  mod_in_mod_in_r2_already_good.changed:
+  HELLO
+  
+  mod_in_r0_already_good.changed:
+  HELLO
+  
+  mod_in_r1.changed:
+  hello
+  WORLD
+  
+  mod_in_r1.ignored:
+  hello
+  world
+  
+  mod_in_r1.whole:
+  HELLO
+  WORLD
+  
+  mod_in_r1_already_good.changed:
+  WORLD
+  
+  mod_in_r2.changed:
+  hello
+  WORLD
+  
+  mod_in_r2.ignored:
+  hello
+  world
+  
+  mod_in_r2.whole:
+  HELLO
+  WORLD
+  
+  mod_in_r2_already_good.changed:
+  WORLD
+  
+  new_in_child.changed:
+  NEW IN CHILD
+  
+  new_in_child.ignored:
+  new in child
+  
+  new_in_child.whole:
+  NEW IN CHILD
+  
+  new_in_child_already_good.changed:
+  ALREADY GOOD, NEW IN THIS REV
+  
+  new_in_merge.changed:
+  NEW IN MERGE
+  
+  new_in_merge.ignored:
+  new in merge
+  
+  new_in_merge.whole:
+  NEW IN MERGE
+  
+  new_in_merge_already_good.changed:
+  ALREADY GOOD, NEW IN THIS REV
+  
+  new_in_r1.changed:
+  NEW IN R1
+  
+  new_in_r1.ignored:
+  new in r1
+  
+  new_in_r1.whole:
+  NEW IN R1
+  
+  new_in_r1_already_good.changed:
+  ALREADY GOOD, NEW IN THIS REV
+  
+  new_in_r2.changed:
+  NEW IN R2
+  
+  new_in_r2.ignored:
+  new in r2
+  
+  new_in_r2.whole:
+  NEW IN R2
+  
+  new_in_r2_already_good.changed:
+  ALREADY GOOD, NEW IN THIS REV
+  
+  r0.changed:
+  hello
+  
+  r0.ignored:
+  hello
+  
+  r0.whole:
+  hello
+  
--- a/tests/test-flagprocessor.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-flagprocessor.t	Tue Jan 21 13:14:51 2020 -0500
@@ -204,7 +204,8 @@
     File "*/mercurial/extensions.py", line *, in _runextsetup (glob)
       extsetup(ui)
     File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
-      REVIDX_NOOP, (noopdonothingread, noopdonothing, validatehash,)
+      flagutil.addflagprocessor( (py38 !)
+      REVIDX_NOOP, (noopdonothingread, noopdonothing, validatehash,) (no-py38 !)
     File "*/mercurial/revlogutils/flagutil.py", line *, in addflagprocessor (glob)
       insertflagprocessor(flag, processor, flagprocessors)
     File "*/mercurial/revlogutils/flagutil.py", line *, in insertflagprocessor (glob)
--- a/tests/test-fuzz-targets.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-fuzz-targets.t	Tue Jan 21 13:14:51 2020 -0500
@@ -1,6 +1,7 @@
 #require test-repo
 
   $ cd $TESTDIR/../contrib/fuzz
+  $ OUT=$TESTTMP ; export OUT
 
 which(1) could exit nonzero, but that's fine because we'll still end
 up without a valid executable, so we don't need to check $? here.
@@ -27,20 +28,37 @@
 
 #if clang-libfuzzer
   $ CXX=clang++ havefuzz || exit 80
-  $ $MAKE -s clean all
+  $ $MAKE -s clean all PYTHON_CONFIG=`which python-config`
 #endif
 #if no-clang-libfuzzer clang-6.0
   $ CXX=clang++-6.0 havefuzz || exit 80
-  $ $MAKE -s clean all CC=clang-6.0 CXX=clang++-6.0
+  $ $MAKE -s clean all CC=clang-6.0 CXX=clang++-6.0 PYTHON_CONFIG=`which python-config`
 #endif
 #if no-clang-libfuzzer no-clang-6.0
   $ exit 80
 #endif
 
-Just run the fuzzers for five seconds each to verify it works at all.
-  $ ./bdiff -max_total_time 5
-  $ ./mpatch -max_total_time 5
-  $ ./xdiff -max_total_time 5
+  $ cd $TESTTMP
+
+Run each fuzzer using dummy.cc as a fake input, to make sure it runs
+at all. In the future we should instead unpack the corpus for each
+fuzzer and use that instead.
+
+  $ for fuzzer in `ls *_fuzzer | sort` ; do
+  >   echo run $fuzzer...
+  >   ./$fuzzer dummy.cc > /dev/null 2>&1 
+  > done
+  run bdiff_fuzzer...
+  run dirs_fuzzer...
+  run dirstate_fuzzer...
+  run fm1readmarkers_fuzzer...
+  run fncache_fuzzer...
+  run jsonescapeu8fast_fuzzer...
+  run manifest_fuzzer...
+  run mpatch_fuzzer...
+  run revlog_fuzzer...
+  run xdiff_fuzzer...
 
 Clean up.
+  $ cd $TESTDIR/../contrib/fuzz
   $ $MAKE -s clean
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-graft-interrupted.t	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,771 @@
+#testcases abortcommand abortflag
+
+#if abortflag
+  $ cat >> $HGRCPATH <<EOF
+  > [alias]
+  > abort = graft --abort
+  > EOF
+#endif
+
+
+Testing the reading of old format graftstate file with newer mercurial
+
+  $ hg init oldgraft
+  $ cd oldgraft
+  $ for ch in a b c; do echo foo > $ch; hg add $ch; hg ci -Aqm "added "$ch; done;
+  $ hg log -GT "{rev}:{node|short} {desc}\n"
+  @  2:8be98ac1a569 added c
+  |
+  o  1:80e6d2c47cfe added b
+  |
+  o  0:f7ad41964313 added a
+  
+  $ hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ echo bar > b
+  $ hg add b
+  $ hg ci -m "bar to b"
+  created new head
+  $ hg graft -r 1 -r 2
+  grafting 1:80e6d2c47cfe "added b"
+  merging b
+  warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+Writing the nodes in old format to graftstate
+
+  $ hg log -r 1 -r 2 -T '{node}\n' > .hg/graftstate
+  $ echo foo > b
+  $ hg resolve -m
+  (no more unresolved files)
+  continue: hg graft --continue
+  $ hg graft --continue
+  grafting 1:80e6d2c47cfe "added b"
+  grafting 2:8be98ac1a569 "added c"
+
+Testing that --user is preserved during conflicts and value is reused while
+running `hg graft --continue`
+
+  $ hg log -G
+  @  changeset:   5:711e9fa999f1
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     added c
+  |
+  o  changeset:   4:e5ad7353b408
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     added b
+  |
+  o  changeset:   3:9e887f7a939c
+  |  parent:      0:f7ad41964313
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     bar to b
+  |
+  | o  changeset:   2:8be98ac1a569
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     added c
+  | |
+  | o  changeset:   1:80e6d2c47cfe
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     added b
+  |
+  o  changeset:   0:f7ad41964313
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     added a
+  
+
+  $ hg up '.^^'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+
+  $ hg graft -r 1 -r 2 --user batman
+  grafting 1:80e6d2c47cfe "added b"
+  merging b
+  warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+  $ echo wat > b
+  $ hg resolve -m
+  (no more unresolved files)
+  continue: hg graft --continue
+
+  $ hg graft --continue
+  grafting 1:80e6d2c47cfe "added b"
+  grafting 2:8be98ac1a569 "added c"
+
+  $ hg log -Gr 3::
+  @  changeset:   7:11a36ffaacf2
+  |  tag:         tip
+  |  user:        batman
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     added c
+  |
+  o  changeset:   6:76803afc6511
+  |  parent:      3:9e887f7a939c
+  |  user:        batman
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     added b
+  |
+  | o  changeset:   5:711e9fa999f1
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     added c
+  | |
+  | o  changeset:   4:e5ad7353b408
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     added b
+  |
+  o  changeset:   3:9e887f7a939c
+  |  parent:      0:f7ad41964313
+  ~  user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     bar to b
+  
+Test that --date is preserved and reused in `hg graft --continue`
+
+  $ hg up '.^^'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg graft -r 1 -r 2 --date '1234560000 120'
+  grafting 1:80e6d2c47cfe "added b"
+  merging b
+  warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+  $ echo foobar > b
+  $ hg resolve -m
+  (no more unresolved files)
+  continue: hg graft --continue
+  $ hg graft --continue
+  grafting 1:80e6d2c47cfe "added b"
+  grafting 2:8be98ac1a569 "added c"
+
+  $ hg log -Gr '.^^::.'
+  @  changeset:   9:1896b76e007a
+  |  tag:         tip
+  |  user:        test
+  |  date:        Fri Feb 13 21:18:00 2009 -0002
+  |  summary:     added c
+  |
+  o  changeset:   8:ce2b4f1632af
+  |  parent:      3:9e887f7a939c
+  |  user:        test
+  |  date:        Fri Feb 13 21:18:00 2009 -0002
+  |  summary:     added b
+  |
+  o  changeset:   3:9e887f7a939c
+  |  parent:      0:f7ad41964313
+  ~  user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     bar to b
+  
+Test that --log is preserved and reused in `hg graft --continue`
+
+  $ hg up '.^^'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg graft -r 1 -r 2 --log
+  grafting 1:80e6d2c47cfe "added b"
+  merging b
+  warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+  $ echo foobar > b
+  $ hg resolve -m
+  (no more unresolved files)
+  continue: hg graft --continue
+
+  $ hg graft --continue
+  grafting 1:80e6d2c47cfe "added b"
+  grafting 2:8be98ac1a569 "added c"
+
+  $ hg log -GT "{rev}:{node|short} {desc}" -r '.^^::.'
+  @  11:30c1050a58b2 added c
+  |  (grafted from 8be98ac1a56990c2d9ca6861041b8390af7bd6f3)
+  o  10:ec7eda2313e2 added b
+  |  (grafted from 80e6d2c47cfe5b3185519568327a17a061c7efb6)
+  o  3:9e887f7a939c bar to b
+  |
+  ~
+
+  $ cd ..
+
+Testing the --stop flag of `hg graft` which stops the interrupted graft
+
+  $ hg init stopgraft
+  $ cd stopgraft
+  $ for ch in a b c d; do echo $ch > $ch; hg add $ch; hg ci -Aqm "added "$ch; done;
+
+  $ hg log -G
+  @  changeset:   3:9150fe93bec6
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     added d
+  |
+  o  changeset:   2:155349b645be
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     added c
+  |
+  o  changeset:   1:5f6d8a4bf34a
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     added b
+  |
+  o  changeset:   0:9092f1db7931
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     added a
+  
+  $ hg up '.^^'
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+
+  $ echo foo > d
+  $ hg ci -Aqm "added foo to d"
+
+  $ hg graft --stop
+  abort: no interrupted graft found
+  [255]
+
+  $ hg graft -r 3
+  grafting 3:9150fe93bec6 "added d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+  $ hg graft --stop --continue
+  abort: cannot use '--continue' and '--stop' together
+  [255]
+
+  $ hg graft --stop -U
+  abort: cannot specify any other flag with '--stop'
+  [255]
+  $ hg graft --stop --rev 4
+  abort: cannot specify any other flag with '--stop'
+  [255]
+  $ hg graft --stop --log
+  abort: cannot specify any other flag with '--stop'
+  [255]
+
+  $ hg graft --stop
+  stopped the interrupted graft
+  working directory is now at a0deacecd59d
+
+  $ hg diff
+
+  $ hg log -Gr '.'
+  @  changeset:   4:a0deacecd59d
+  |  tag:         tip
+  ~  parent:      1:5f6d8a4bf34a
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     added foo to d
+  
+  $ hg graft -r 2 -r 3
+  grafting 2:155349b645be "added c"
+  grafting 3:9150fe93bec6 "added d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+  $ hg graft --stop
+  stopped the interrupted graft
+  working directory is now at 75b447541a9e
+
+  $ hg diff
+
+  $ hg log -G -T "{rev}:{node|short} {desc}"
+  @  5:75b447541a9e added c
+  |
+  o  4:a0deacecd59d added foo to d
+  |
+  | o  3:9150fe93bec6 added d
+  | |
+  | o  2:155349b645be added c
+  |/
+  o  1:5f6d8a4bf34a added b
+  |
+  o  0:9092f1db7931 added a
+  
+  $ cd ..
+
+Testing the --abort flag for `hg graft` which aborts and rollback to state
+before the graft
+
+  $ hg init abortgraft
+  $ cd abortgraft
+  $ for ch in a b c d; do echo $ch > $ch; hg add $ch; hg ci -Aqm "added "$ch; done;
+
+  $ hg up '.^^'
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+
+  $ echo x > x
+  $ hg ci -Aqm "added x"
+  $ hg up '.^'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo foo > c
+  $ hg ci -Aqm "added foo to c"
+
+  $ hg log -GT "{rev}:{node|short} {desc}"
+  @  5:36b793615f78 added foo to c
+  |
+  | o  4:863a25e1a9ea added x
+  |/
+  | o  3:9150fe93bec6 added d
+  | |
+  | o  2:155349b645be added c
+  |/
+  o  1:5f6d8a4bf34a added b
+  |
+  o  0:9092f1db7931 added a
+  
+  $ hg up 9150fe93bec6
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ hg abort
+  abort: no interrupted graft to abort (abortflag !)
+  abort: no operation in progress (abortcommand !)
+  [255]
+
+when stripping is required
+  $ hg graft -r 4 -r 5
+  grafting 4:863a25e1a9ea "added x"
+  grafting 5:36b793615f78 "added foo to c" (tip)
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+  $ hg graft --continue --abort
+  abort: cannot use '--continue' and '--abort' together
+  [255]
+
+  $ hg graft --abort --stop
+  abort: cannot use '--abort' and '--stop' together
+  [255]
+
+  $ hg graft --abort --currentuser
+  abort: cannot specify any other flag with '--abort'
+  [255]
+
+  $ hg graft --abort --edit
+  abort: cannot specify any other flag with '--abort'
+  [255]
+
+#if abortcommand
+when in dry-run mode
+  $ hg abort --dry-run
+  graft in progress, will be aborted
+#endif
+
+  $ hg abort
+  graft aborted
+  working directory is now at 9150fe93bec6
+  $ hg log -GT "{rev}:{node|short} {desc}"
+  o  5:36b793615f78 added foo to c
+  |
+  | o  4:863a25e1a9ea added x
+  |/
+  | @  3:9150fe93bec6 added d
+  | |
+  | o  2:155349b645be added c
+  |/
+  o  1:5f6d8a4bf34a added b
+  |
+  o  0:9092f1db7931 added a
+  
+when stripping is not required
+  $ hg graft -r 5
+  grafting 5:36b793615f78 "added foo to c" (tip)
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+  $ hg abort
+  graft aborted
+  working directory is now at 9150fe93bec6
+  $ hg log -GT "{rev}:{node|short} {desc}"
+  o  5:36b793615f78 added foo to c
+  |
+  | o  4:863a25e1a9ea added x
+  |/
+  | @  3:9150fe93bec6 added d
+  | |
+  | o  2:155349b645be added c
+  |/
+  o  1:5f6d8a4bf34a added b
+  |
+  o  0:9092f1db7931 added a
+  
+when some of the changesets became public
+
+  $ hg graft -r 4 -r 5
+  grafting 4:863a25e1a9ea "added x"
+  grafting 5:36b793615f78 "added foo to c" (tip)
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+  $ hg log -GT "{rev}:{node|short} {desc}"
+  @  6:6ec71c037d94 added x
+  |
+  | o  5:36b793615f78 added foo to c
+  | |
+  | | o  4:863a25e1a9ea added x
+  | |/
+  o |  3:9150fe93bec6 added d
+  | |
+  o |  2:155349b645be added c
+  |/
+  o  1:5f6d8a4bf34a added b
+  |
+  o  0:9092f1db7931 added a
+  
+  $ hg phase -r 6 --public
+
+  $ hg abort
+  cannot clean up public changesets 6ec71c037d94
+  graft aborted
+  working directory is now at 6ec71c037d94
+
+when we created new changesets on top of existing one
+
+  $ hg up '.^^'
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ echo y > y
+  $ hg ci -Aqm "added y"
+  $ echo z > z
+  $ hg ci -Aqm "added z"
+
+  $ hg up 3
+  1 files updated, 0 files merged, 3 files removed, 0 files unresolved
+  $ hg log -GT "{rev}:{node|short} {desc}"
+  o  8:637f9e9bbfd4 added z
+  |
+  o  7:123221671fd4 added y
+  |
+  | o  6:6ec71c037d94 added x
+  | |
+  | | o  5:36b793615f78 added foo to c
+  | | |
+  | | | o  4:863a25e1a9ea added x
+  | | |/
+  | @ |  3:9150fe93bec6 added d
+  |/ /
+  o /  2:155349b645be added c
+  |/
+  o  1:5f6d8a4bf34a added b
+  |
+  o  0:9092f1db7931 added a
+  
+  $ hg graft -r 8 -r 7 -r 5
+  grafting 8:637f9e9bbfd4 "added z" (tip)
+  grafting 7:123221671fd4 "added y"
+  grafting 5:36b793615f78 "added foo to c"
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+  $ cd ..
+  $ hg init pullrepo
+  $ cd pullrepo
+  $ cat >> .hg/hgrc <<EOF
+  > [phases]
+  > publish=False
+  > EOF
+  $ hg pull ../abortgraft --config phases.publish=False
+  pulling from ../abortgraft
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 11 changesets with 9 changes to 8 files (+4 heads)
+  new changesets 9092f1db7931:6b98ff0062dd (6 drafts)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+  $ hg up 9
+  5 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo w > w
+  $ hg ci -Aqm "added w" --config phases.publish=False
+
+  $ cd ../abortgraft
+  $ hg pull ../pullrepo
+  pulling from ../pullrepo
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  new changesets 311dfc6cf3bf (1 drafts)
+  (run 'hg heads .' to see heads, 'hg merge' to merge)
+
+  $ hg abort
+  new changesets detected on destination branch, can't strip
+  graft aborted
+  working directory is now at 6b98ff0062dd
+
+  $ cd ..
+
+============================
+Testing --no-commit option:|
+============================
+
+  $ hg init nocommit
+  $ cd nocommit
+  $ echo a > a
+  $ hg ci -qAma
+  $ echo b > b
+  $ hg ci -qAmb
+  $ hg up -q 0
+  $ echo c > c
+  $ hg ci -qAmc
+  $ hg log -GT "{rev}:{node|short} {desc}\n"
+  @  2:d36c0562f908 c
+  |
+  | o  1:d2ae7f538514 b
+  |/
+  o  0:cb9a9f314b8b a
+  
+
+Check reporting when --no-commit used with non-applicable options:
+
+  $ hg graft 1 --no-commit -e
+  abort: cannot specify --no-commit and --edit together
+  [255]
+
+  $ hg graft 1 --no-commit --log
+  abort: cannot specify --no-commit and --log together
+  [255]
+
+  $ hg graft 1 --no-commit -D
+  abort: cannot specify --no-commit and --currentdate together
+  [255]
+
+Test --no-commit is working:
+  $ hg graft 1 --no-commit
+  grafting 1:d2ae7f538514 "b"
+
+  $ hg log -GT "{rev}:{node|short} {desc}\n"
+  @  2:d36c0562f908 c
+  |
+  | o  1:d2ae7f538514 b
+  |/
+  o  0:cb9a9f314b8b a
+  
+
+  $ hg diff
+  diff -r d36c0562f908 b
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/b	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +b
+
+Prepare wrdir to check --no-commit is resepected after --continue:
+
+  $ hg up -qC
+  $ echo A>a
+  $ hg ci -qm "A in file a"
+  $ hg up -q 1
+  $ echo B>a
+  $ hg ci -qm "B in file a"
+  $ hg log -GT "{rev}:{node|short} {desc}\n"
+  @  4:2aa9ad1006ff B in file a
+  |
+  | o  3:09e253b87e17 A in file a
+  | |
+  | o  2:d36c0562f908 c
+  | |
+  o |  1:d2ae7f538514 b
+  |/
+  o  0:cb9a9f314b8b a
+  
+
+  $ hg graft 3 --no-commit
+  grafting 3:09e253b87e17 "A in file a"
+  merging a
+  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+Resolve conflict:
+  $ echo A>a
+  $ hg resolve --mark
+  (no more unresolved files)
+  continue: hg graft --continue
+
+  $ hg graft --continue
+  grafting 3:09e253b87e17 "A in file a"
+  $ hg log -GT "{rev}:{node|short} {desc}\n"
+  @  4:2aa9ad1006ff B in file a
+  |
+  | o  3:09e253b87e17 A in file a
+  | |
+  | o  2:d36c0562f908 c
+  | |
+  o |  1:d2ae7f538514 b
+  |/
+  o  0:cb9a9f314b8b a
+  
+  $ hg diff
+  diff -r 2aa9ad1006ff a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,1 +1,1 @@
+  -B
+  +A
+
+  $ hg up -qC
+
+Check --no-commit is resepected when passed with --continue:
+
+  $ hg graft 3
+  grafting 3:09e253b87e17 "A in file a"
+  merging a
+  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+Resolve conflict:
+  $ echo A>a
+  $ hg resolve --mark
+  (no more unresolved files)
+  continue: hg graft --continue
+
+  $ hg graft --continue --no-commit
+  grafting 3:09e253b87e17 "A in file a"
+  $ hg diff
+  diff -r 2aa9ad1006ff a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,1 +1,1 @@
+  -B
+  +A
+
+  $ hg log -GT "{rev}:{node|short} {desc}\n"
+  @  4:2aa9ad1006ff B in file a
+  |
+  | o  3:09e253b87e17 A in file a
+  | |
+  | o  2:d36c0562f908 c
+  | |
+  o |  1:d2ae7f538514 b
+  |/
+  o  0:cb9a9f314b8b a
+  
+  $ hg up -qC
+
+Test --no-commit when graft multiple revisions:
+When there is conflict:
+  $ hg graft -r "2::3" --no-commit
+  grafting 2:d36c0562f908 "c"
+  grafting 3:09e253b87e17 "A in file a"
+  merging a
+  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+  abort: unresolved conflicts, can't continue
+  (use 'hg resolve' and 'hg graft --continue')
+  [255]
+
+  $ echo A>a
+  $ hg resolve --mark
+  (no more unresolved files)
+  continue: hg graft --continue
+  $ hg graft --continue
+  grafting 3:09e253b87e17 "A in file a"
+  $ hg diff
+  diff -r 2aa9ad1006ff a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,1 +1,1 @@
+  -B
+  +A
+  diff -r 2aa9ad1006ff c
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +c
+
+  $ hg log -GT "{rev}:{node|short} {desc}\n"
+  @  4:2aa9ad1006ff B in file a
+  |
+  | o  3:09e253b87e17 A in file a
+  | |
+  | o  2:d36c0562f908 c
+  | |
+  o |  1:d2ae7f538514 b
+  |/
+  o  0:cb9a9f314b8b a
+  
+  $ hg up -qC
+
+When there is no conflict:
+  $ echo d>d
+  $ hg add d -q
+  $ hg ci -qmd
+  $ hg up 3 -q
+  $ hg log -GT "{rev}:{node|short} {desc}\n"
+  o  5:baefa8927fc0 d
+  |
+  o  4:2aa9ad1006ff B in file a
+  |
+  | @  3:09e253b87e17 A in file a
+  | |
+  | o  2:d36c0562f908 c
+  | |
+  o |  1:d2ae7f538514 b
+  |/
+  o  0:cb9a9f314b8b a
+  
+
+  $ hg graft -r 1 -r 5 --no-commit
+  grafting 1:d2ae7f538514 "b"
+  grafting 5:baefa8927fc0 "d" (tip)
+  $ hg diff
+  diff -r 09e253b87e17 b
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/b	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +b
+  diff -r 09e253b87e17 d
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/d	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +d
+  $ hg log -GT "{rev}:{node|short} {desc}\n"
+  o  5:baefa8927fc0 d
+  |
+  o  4:2aa9ad1006ff B in file a
+  |
+  | @  3:09e253b87e17 A in file a
+  | |
+  | o  2:d36c0562f908 c
+  | |
+  o |  1:d2ae7f538514 b
+  |/
+  o  0:cb9a9f314b8b a
+  
+  $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-graft-rename.t	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,724 @@
+
+Graft from behind a move or rename
+==================================
+
+NOTE: This is affected by issue5343, and will need updating when it's fixed
+
+Consider this topology for a regular graft:
+
+o c1
+|
+| o c2
+| |
+| o ca # stands for "common ancestor"
+|/
+o cta # stands for "common topological ancestor"
+
+Note that in issue5343, ca==cta.
+
+The following table shows the possible cases. Here, "x->y" and, equivalently,
+"y<-x", where x is an ancestor of y, means that some copy happened from x to y.
+
+name | c1<-cta | cta<->ca | ca->c2
+A.0  |         |          |
+A.1  |    X    |          |
+A.2  |         |     X    |
+A.3  |         |          |   X
+A.4  |    X    |     X    |
+A.5  |    X    |          |   X
+A.6  |         |     X    |   X
+A.7  |    X    |     X    |   X
+
+A.0 is trivial, and doesn't need copy tracking.
+For A.1, a forward rename is recorded in the c1 pass, to be followed later.
+In A.2, the rename is recorded in the c2 pass and followed backwards.
+A.3 is recorded in the c2 pass as a forward rename to be duplicated on target.
+In A.4, both passes of checkcopies record incomplete renames, which are
+then joined in mergecopies to record a rename to be followed.
+In A.5 and A.7, the c1 pass records an incomplete rename, while the c2 pass
+records an incomplete divergence. The incomplete rename is then joined to the
+appropriate side of the incomplete divergence, and the result is recorded as a
+divergence. The code doesn't distinguish at all between these two cases, since
+the end result of them is the same: an incomplete divergence joined with an
+incomplete rename into a divergence.
+Finally, A.6 records a divergence entirely in the c2 pass.
+
+A.4 has a degenerate case a<-b<-a->a, where checkcopies isn't needed at all.
+A.5 has a special case a<-b<-b->a, which is treated like a<-b->a in a merge.
+A.5 has issue5343 as a special case.
+A.6 has a special case a<-a<-b->a. Here, checkcopies will find a spurious
+incomplete divergence, which is in fact complete. This is handled later in
+mergecopies.
+A.7 has 4 special cases: a<-b<-a->b (the "ping-pong" case), a<-b<-c->b,
+a<-b<-a->c and a<-b<-c->a. Of these, only the "ping-pong" case is interesting,
+the others are fairly trivial (a<-b<-c->b and a<-b<-a->c proceed like the base
+case, a<-b<-c->a is treated the same as a<-b<-b->a).
+
+f5a therefore tests the "ping-pong" rename case, where a file is renamed to the
+same name on both branches, then the rename is backed out on one branch, and
+the backout is grafted to the other branch. This creates a challenging rename
+sequence of a<-b<-a->b in the graft target, topological CA, graft CA and graft
+source, respectively. Since rename detection will run on the c1 side for such a
+sequence (as for technical reasons, we split the c1 and c2 sides not at the
+graft CA, but rather at the topological CA), it will pick up a false rename,
+and cause a spurious merge conflict. This false rename is always exactly the
+reverse of the true rename that would be detected on the c2 side, so we can
+correct for it by detecting this condition and reversing as necessary.
+
+First, set up the repository with commits to be grafted
+
+  $ hg init graftmove
+  $ cd graftmove
+  $ echo c1a > f1a
+  $ echo c2a > f2a
+  $ echo c3a > f3a
+  $ echo c4a > f4a
+  $ echo c5a > f5a
+  $ hg ci -qAm A0
+  $ hg mv f1a f1b
+  $ hg mv f3a f3b
+  $ hg mv f5a f5b
+  $ hg ci -qAm B0
+  $ echo c1c > f1b
+  $ hg mv f2a f2c
+  $ hg mv f5b f5a
+  $ echo c5c > f5a
+  $ hg ci -qAm C0
+  $ hg mv f3b f3d
+  $ echo c4d > f4a
+  $ hg ci -qAm D0
+  $ hg log -G
+  @  changeset:   3:b69f5839d2d9
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     D0
+  |
+  o  changeset:   2:f58c7e2b28fa
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     C0
+  |
+  o  changeset:   1:3d7bba921b5d
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     B0
+  |
+  o  changeset:   0:11f7a1b56675
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     A0
+  
+
+Test the cases A.2 (f1x), A.3 (f2x) and a special case of A.6 (f5x) where the
+two renames actually converge to the same name (thus no actual divergence).
+
+  $ hg up -q 'desc("A0")'
+  $ HGEDITOR="echo C1 >" hg graft -r 'desc("C0")' --edit
+  grafting 2:f58c7e2b28fa "C0"
+  merging f1a and f1b to f1a
+  merging f5a
+  $ hg status --change .
+  M f1a
+  M f5a
+  A f2c
+  R f2a
+  $ hg cat f1a
+  c1c
+  $ hg cat f1b
+  f1b: no such file in rev c9763722f9bd
+  [1]
+
+Test the cases A.0 (f4x) and A.6 (f3x)
+
+  $ HGEDITOR="echo D1 >" hg graft -r 'desc("D0")' --edit
+  grafting 3:b69f5839d2d9 "D0"
+  note: possible conflict - f3b was renamed multiple times to:
+   f3a
+   f3d
+
+Set up the repository for some further tests
+
+  $ hg up -q "min(desc("A0"))"
+  $ hg mv f1a f1e
+  $ echo c2e > f2a
+  $ hg mv f3a f3e
+  $ hg mv f4a f4e
+  $ hg mv f5a f5b
+  $ hg ci -qAm "E0"
+  $ hg up -q "min(desc("A0"))"
+  $ hg cp f1a f1f
+  $ hg ci -qAm "F0"
+  $ hg up -q "min(desc("A0"))"
+  $ hg cp f1a f1g
+  $ echo c1g > f1g
+  $ hg ci -qAm "G0"
+  $ hg log -G
+  @  changeset:   8:ba67f08fb15a
+  |  tag:         tip
+  |  parent:      0:11f7a1b56675
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     G0
+  |
+  | o  changeset:   7:d376ab0d7fda
+  |/   parent:      0:11f7a1b56675
+  |    user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     F0
+  |
+  | o  changeset:   6:6bd1736cab86
+  |/   parent:      0:11f7a1b56675
+  |    user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     E0
+  |
+  | o  changeset:   5:560daee679da
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     D1
+  | |
+  | o  changeset:   4:c9763722f9bd
+  |/   parent:      0:11f7a1b56675
+  |    user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     C1
+  |
+  | o  changeset:   3:b69f5839d2d9
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     D0
+  | |
+  | o  changeset:   2:f58c7e2b28fa
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     C0
+  | |
+  | o  changeset:   1:3d7bba921b5d
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     B0
+  |
+  o  changeset:   0:11f7a1b56675
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     A0
+  
+
+Test the cases A.4 (f1x), the "ping-pong" special case of A.7 (f5x),
+and A.3 with a local content change to be preserved (f2x).
+
+  $ hg up -q "desc("E0")"
+  $ HGEDITOR="echo C2 >" hg graft -r 'desc("C0")' --edit
+  grafting 2:f58c7e2b28fa "C0"
+  merging f1e and f1b to f1e
+  merging f2a and f2c to f2c
+
+Test the cases A.1 (f4x) and A.7 (f3x).
+
+  $ HGEDITOR="echo D2 >" hg graft -r 'desc("D0")' --edit
+  grafting 3:b69f5839d2d9 "D0"
+  note: possible conflict - f3b was renamed multiple times to:
+   f3d
+   f3e
+  merging f4e and f4a to f4e
+
+  $ hg cat f2c
+  c2e
+
+Test the case A.5 (move case, f1x).
+
+  $ hg up -q "desc("C0")"
+  $ HGEDITOR="echo E1 >" hg graft -r 'desc("E0")' --edit
+  grafting 6:6bd1736cab86 "E0"
+  note: possible conflict - f1a was renamed multiple times to:
+   f1b
+   f1e
+  note: possible conflict - f3a was renamed multiple times to:
+   f3b
+   f3e
+  merging f2c and f2a to f2c
+  merging f5a and f5b to f5b
+  $ cat f1e
+  c1a
+
+Test the case A.5 (copy case, f1x).
+
+  $ hg up -q "desc("C0")"
+  $ HGEDITOR="echo F1 >" hg graft -r 'desc("F0")' --edit
+  grafting 7:d376ab0d7fda "F0"
+BROKEN: f1f should be marked a copy from f1b
+  $ hg st --copies --change .
+  A f1f
+BROKEN: f1f should have the new content from f1b (i.e. "c1c")
+  $ cat f1f
+  c1a
+
+Test the case A.5 (copy+modify case, f1x).
+
+  $ hg up -q "desc("C0")"
+BROKEN: We should get a merge conflict from the 3-way merge between f1b in C0
+(content "c1c") and f1g in G0 (content "c1g") with f1a in A0 as base (content
+"c1a")
+  $ HGEDITOR="echo G1 >" hg graft -r 'desc("G0")' --edit
+  grafting 8:ba67f08fb15a "G0"
+
+Check the results of the grafts tested
+
+  $ hg log -CGv --patch --git
+  @  changeset:   13:ef3adf6c20a4
+  |  tag:         tip
+  |  parent:      2:f58c7e2b28fa
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  files:       f1g
+  |  description:
+  |  G1
+  |
+  |
+  |  diff --git a/f1g b/f1g
+  |  new file mode 100644
+  |  --- /dev/null
+  |  +++ b/f1g
+  |  @@ -0,0 +1,1 @@
+  |  +c1g
+  |
+  | o  changeset:   12:b5542d755b54
+  |/   parent:      2:f58c7e2b28fa
+  |    user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    files:       f1f
+  |    description:
+  |    F1
+  |
+  |
+  |    diff --git a/f1f b/f1f
+  |    new file mode 100644
+  |    --- /dev/null
+  |    +++ b/f1f
+  |    @@ -0,0 +1,1 @@
+  |    +c1a
+  |
+  | o  changeset:   11:f8a162271246
+  |/   parent:      2:f58c7e2b28fa
+  |    user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    files:       f1e f2c f3e f4a f4e f5a f5b
+  |    copies:      f4e (f4a) f5b (f5a)
+  |    description:
+  |    E1
+  |
+  |
+  |    diff --git a/f1e b/f1e
+  |    new file mode 100644
+  |    --- /dev/null
+  |    +++ b/f1e
+  |    @@ -0,0 +1,1 @@
+  |    +c1a
+  |    diff --git a/f2c b/f2c
+  |    --- a/f2c
+  |    +++ b/f2c
+  |    @@ -1,1 +1,1 @@
+  |    -c2a
+  |    +c2e
+  |    diff --git a/f3e b/f3e
+  |    new file mode 100644
+  |    --- /dev/null
+  |    +++ b/f3e
+  |    @@ -0,0 +1,1 @@
+  |    +c3a
+  |    diff --git a/f4a b/f4e
+  |    rename from f4a
+  |    rename to f4e
+  |    diff --git a/f5a b/f5b
+  |    rename from f5a
+  |    rename to f5b
+  |
+  | o  changeset:   10:93ee502e8b0a
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  files:       f3d f4e
+  | |  description:
+  | |  D2
+  | |
+  | |
+  | |  diff --git a/f3d b/f3d
+  | |  new file mode 100644
+  | |  --- /dev/null
+  | |  +++ b/f3d
+  | |  @@ -0,0 +1,1 @@
+  | |  +c3a
+  | |  diff --git a/f4e b/f4e
+  | |  --- a/f4e
+  | |  +++ b/f4e
+  | |  @@ -1,1 +1,1 @@
+  | |  -c4a
+  | |  +c4d
+  | |
+  | o  changeset:   9:539cf145f496
+  | |  parent:      6:6bd1736cab86
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  files:       f1e f2a f2c f5a f5b
+  | |  copies:      f2c (f2a) f5a (f5b)
+  | |  description:
+  | |  C2
+  | |
+  | |
+  | |  diff --git a/f1e b/f1e
+  | |  --- a/f1e
+  | |  +++ b/f1e
+  | |  @@ -1,1 +1,1 @@
+  | |  -c1a
+  | |  +c1c
+  | |  diff --git a/f2a b/f2c
+  | |  rename from f2a
+  | |  rename to f2c
+  | |  diff --git a/f5b b/f5a
+  | |  rename from f5b
+  | |  rename to f5a
+  | |  --- a/f5b
+  | |  +++ b/f5a
+  | |  @@ -1,1 +1,1 @@
+  | |  -c5a
+  | |  +c5c
+  | |
+  | | o  changeset:   8:ba67f08fb15a
+  | | |  parent:      0:11f7a1b56675
+  | | |  user:        test
+  | | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | | |  files:       f1g
+  | | |  copies:      f1g (f1a)
+  | | |  description:
+  | | |  G0
+  | | |
+  | | |
+  | | |  diff --git a/f1a b/f1g
+  | | |  copy from f1a
+  | | |  copy to f1g
+  | | |  --- a/f1a
+  | | |  +++ b/f1g
+  | | |  @@ -1,1 +1,1 @@
+  | | |  -c1a
+  | | |  +c1g
+  | | |
+  | | | o  changeset:   7:d376ab0d7fda
+  | | |/   parent:      0:11f7a1b56675
+  | | |    user:        test
+  | | |    date:        Thu Jan 01 00:00:00 1970 +0000
+  | | |    files:       f1f
+  | | |    copies:      f1f (f1a)
+  | | |    description:
+  | | |    F0
+  | | |
+  | | |
+  | | |    diff --git a/f1a b/f1f
+  | | |    copy from f1a
+  | | |    copy to f1f
+  | | |
+  | o |  changeset:   6:6bd1736cab86
+  | |/   parent:      0:11f7a1b56675
+  | |    user:        test
+  | |    date:        Thu Jan 01 00:00:00 1970 +0000
+  | |    files:       f1a f1e f2a f3a f3e f4a f4e f5a f5b
+  | |    copies:      f1e (f1a) f3e (f3a) f4e (f4a) f5b (f5a)
+  | |    description:
+  | |    E0
+  | |
+  | |
+  | |    diff --git a/f1a b/f1e
+  | |    rename from f1a
+  | |    rename to f1e
+  | |    diff --git a/f2a b/f2a
+  | |    --- a/f2a
+  | |    +++ b/f2a
+  | |    @@ -1,1 +1,1 @@
+  | |    -c2a
+  | |    +c2e
+  | |    diff --git a/f3a b/f3e
+  | |    rename from f3a
+  | |    rename to f3e
+  | |    diff --git a/f4a b/f4e
+  | |    rename from f4a
+  | |    rename to f4e
+  | |    diff --git a/f5a b/f5b
+  | |    rename from f5a
+  | |    rename to f5b
+  | |
+  | | o  changeset:   5:560daee679da
+  | | |  user:        test
+  | | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | | |  files:       f3d f4a
+  | | |  description:
+  | | |  D1
+  | | |
+  | | |
+  | | |  diff --git a/f3d b/f3d
+  | | |  new file mode 100644
+  | | |  --- /dev/null
+  | | |  +++ b/f3d
+  | | |  @@ -0,0 +1,1 @@
+  | | |  +c3a
+  | | |  diff --git a/f4a b/f4a
+  | | |  --- a/f4a
+  | | |  +++ b/f4a
+  | | |  @@ -1,1 +1,1 @@
+  | | |  -c4a
+  | | |  +c4d
+  | | |
+  | | o  changeset:   4:c9763722f9bd
+  | |/   parent:      0:11f7a1b56675
+  | |    user:        test
+  | |    date:        Thu Jan 01 00:00:00 1970 +0000
+  | |    files:       f1a f2a f2c f5a
+  | |    copies:      f2c (f2a)
+  | |    description:
+  | |    C1
+  | |
+  | |
+  | |    diff --git a/f1a b/f1a
+  | |    --- a/f1a
+  | |    +++ b/f1a
+  | |    @@ -1,1 +1,1 @@
+  | |    -c1a
+  | |    +c1c
+  | |    diff --git a/f2a b/f2c
+  | |    rename from f2a
+  | |    rename to f2c
+  | |    diff --git a/f5a b/f5a
+  | |    --- a/f5a
+  | |    +++ b/f5a
+  | |    @@ -1,1 +1,1 @@
+  | |    -c5a
+  | |    +c5c
+  | |
+  +---o  changeset:   3:b69f5839d2d9
+  | |    user:        test
+  | |    date:        Thu Jan 01 00:00:00 1970 +0000
+  | |    files:       f3b f3d f4a
+  | |    copies:      f3d (f3b)
+  | |    description:
+  | |    D0
+  | |
+  | |
+  | |    diff --git a/f3b b/f3d
+  | |    rename from f3b
+  | |    rename to f3d
+  | |    diff --git a/f4a b/f4a
+  | |    --- a/f4a
+  | |    +++ b/f4a
+  | |    @@ -1,1 +1,1 @@
+  | |    -c4a
+  | |    +c4d
+  | |
+  o |  changeset:   2:f58c7e2b28fa
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  files:       f1b f2a f2c f5a f5b
+  | |  copies:      f2c (f2a) f5a (f5b)
+  | |  description:
+  | |  C0
+  | |
+  | |
+  | |  diff --git a/f1b b/f1b
+  | |  --- a/f1b
+  | |  +++ b/f1b
+  | |  @@ -1,1 +1,1 @@
+  | |  -c1a
+  | |  +c1c
+  | |  diff --git a/f2a b/f2c
+  | |  rename from f2a
+  | |  rename to f2c
+  | |  diff --git a/f5b b/f5a
+  | |  rename from f5b
+  | |  rename to f5a
+  | |  --- a/f5b
+  | |  +++ b/f5a
+  | |  @@ -1,1 +1,1 @@
+  | |  -c5a
+  | |  +c5c
+  | |
+  o |  changeset:   1:3d7bba921b5d
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    files:       f1a f1b f3a f3b f5a f5b
+  |    copies:      f1b (f1a) f3b (f3a) f5b (f5a)
+  |    description:
+  |    B0
+  |
+  |
+  |    diff --git a/f1a b/f1b
+  |    rename from f1a
+  |    rename to f1b
+  |    diff --git a/f3a b/f3b
+  |    rename from f3a
+  |    rename to f3b
+  |    diff --git a/f5a b/f5b
+  |    rename from f5a
+  |    rename to f5b
+  |
+  o  changeset:   0:11f7a1b56675
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     files:       f1a f2a f3a f4a f5a
+     description:
+     A0
+  
+  
+     diff --git a/f1a b/f1a
+     new file mode 100644
+     --- /dev/null
+     +++ b/f1a
+     @@ -0,0 +1,1 @@
+     +c1a
+     diff --git a/f2a b/f2a
+     new file mode 100644
+     --- /dev/null
+     +++ b/f2a
+     @@ -0,0 +1,1 @@
+     +c2a
+     diff --git a/f3a b/f3a
+     new file mode 100644
+     --- /dev/null
+     +++ b/f3a
+     @@ -0,0 +1,1 @@
+     +c3a
+     diff --git a/f4a b/f4a
+     new file mode 100644
+     --- /dev/null
+     +++ b/f4a
+     @@ -0,0 +1,1 @@
+     +c4a
+     diff --git a/f5a b/f5a
+     new file mode 100644
+     --- /dev/null
+     +++ b/f5a
+     @@ -0,0 +1,1 @@
+     +c5a
+  
+Check superfluous filemerge of files renamed in the past but untouched by graft
+
+  $ echo a > a
+  $ hg ci -qAma
+  $ hg mv a b
+  $ echo b > b
+  $ hg ci -qAmb
+  $ echo c > c
+  $ hg ci -qAmc
+  $ hg up -q .~2
+  $ hg graft tip -qt:fail
+
+  $ cd ..
+
+Graft a change into a new file previously grafted into a renamed directory
+
+  $ hg init dirmovenewfile
+  $ cd dirmovenewfile
+  $ mkdir a
+  $ echo a > a/a
+  $ hg ci -qAma
+  $ echo x > a/x
+  $ hg ci -qAmx
+  $ hg up -q 0
+  $ hg mv -q a b
+  $ hg ci -qAmb
+  $ hg graft -q 1 # a/x grafted as b/x, but no copy information recorded
+  $ hg up -q 1
+  $ echo y > a/x
+  $ hg ci -qAmy
+  $ hg up -q 3
+  $ hg graft -q 4
+  $ hg status --change .
+  M b/x
+
+Prepare for test of skipped changesets and how merges can influence it:
+
+  $ hg merge -q -r 1 --tool :local
+  $ hg ci -m m
+  $ echo xx >> b/x
+  $ hg ci -m xx
+
+  $ hg log -G -T '{rev} {desc|firstline}'
+  @  7 xx
+  |
+  o    6 m
+  |\
+  | o  5 y
+  | |
+  +---o  4 y
+  | |
+  | o  3 x
+  | |
+  | o  2 b
+  | |
+  o |  1 x
+  |/
+  o  0 a
+  
+Grafting of plain changes correctly detects that 3 and 5 should be skipped:
+
+  $ hg up -qCr 4
+  $ hg graft --tool :local -r 2::5
+  skipping already grafted revision 3:ca093ca2f1d9 (was grafted from 1:13ec5badbf2a)
+  skipping already grafted revision 5:43e9eb70dab0 (was grafted from 4:6c9a1289e5f1)
+  grafting 2:42127f193bcd "b"
+
+Extending the graft range to include a (skipped) merge of 3 will not prevent us from
+also detecting that both 3 and 5 should be skipped:
+
+  $ hg up -qCr 4
+  $ hg graft --tool :local -r 2::7
+  skipping ungraftable merge revision 6
+  skipping already grafted revision 3:ca093ca2f1d9 (was grafted from 1:13ec5badbf2a)
+  skipping already grafted revision 5:43e9eb70dab0 (was grafted from 4:6c9a1289e5f1)
+  grafting 2:42127f193bcd "b"
+  grafting 7:d3c3f2b38ecc "xx"
+  note: graft of 7:d3c3f2b38ecc created no changes to commit
+
+  $ cd ..
+
+Grafted revision should be warned and skipped only once. (issue6024)
+
+  $ mkdir issue6024
+  $ cd issue6024
+
+  $ hg init base
+  $ cd base
+  $ touch x
+  $ hg commit -qAminit
+  $ echo a > x
+  $ hg commit -mchange
+  $ hg update -q 0
+  $ hg graft -r 1
+  grafting 1:a0b923c546aa "change" (tip)
+  $ cd ..
+
+  $ hg clone -qr 2 base clone
+  $ cd clone
+  $ hg pull -q
+  $ hg merge -q 2
+  $ hg commit -mmerge
+  $ hg update -q 0
+  $ hg graft -r 1
+  grafting 1:04fc6d444368 "change"
+  $ hg update -q 3
+  $ hg log -G -T '{rev}:{node|shortest} <- {extras.source|shortest}\n'
+  o  4:4e16 <- a0b9
+  |
+  | @    3:f0ac <-
+  | |\
+  +---o  2:a0b9 <-
+  | |
+  | o  1:04fc <- a0b9
+  |/
+  o  0:7848 <-
+  
+
+ the source of rev 4 is an ancestor of the working parent, and was also
+ grafted as rev 1. it should be stripped from the target revisions only once.
+
+  $ hg graft -r 4
+  skipping already grafted revision 4:4e16bab40c9c (1:04fc6d444368 also has origin 2:a0b923c546aa)
+  [255]
+
+  $ cd ../..
--- a/tests/test-graft.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-graft.t	Tue Jan 21 13:14:51 2020 -0500
@@ -1,18 +1,9 @@
-#testcases abortcommand abortflag
-
   $ cat >> $HGRCPATH <<EOF
   > [extdiff]
   > # for portability:
   > pdiff = sh "$RUNTESTDIR/pdiff"
   > EOF
 
-#if abortflag
-  $ cat >> $HGRCPATH <<EOF
-  > [alias]
-  > abort = graft --abort
-  > EOF
-#endif
-
 Create a repo with some stuff in it:
 
   $ hg init a
@@ -127,10 +118,10 @@
 
   $ hg up -q 0
   $ hg graft -U --user foo 2
-  abort: --user and --currentuser are mutually exclusive
+  abort: cannot specify both --user and --currentuser
   [255]
   $ hg graft -D --date '0 0' 2
-  abort: --date and --currentdate are mutually exclusive
+  abort: cannot specify both --date and --currentdate
   [255]
 
 Can't graft with dirty wd:
@@ -502,7 +493,6 @@
   $ hg up -Cq 1
   $ hg graft 3 --log -u foo
   grafting 3:4c60f11aa304 "3"
-  warning: can't find ancestor for 'c' copied from 'b'!
   $ hg log --template '{rev}:{node|short} {parents} {desc}\n' -r tip
   14:0c921c65ef1e 1:5d205f8b35b6  3
   (grafted from 4c60f11aa304a54ae1c199feb94e7fc771e51ed8)
@@ -762,12 +752,7 @@
    branchmerge: True, force: True, partial: False
    ancestor: b592ea63bb0c, local: 7e61b508e709+, remote: 7a4785234d87
   starting 4 threads for background file closing (?)
-  committing files:
-  b
-  warning: can't find ancestor for 'b' copied from 'a'!
-  reusing manifest from p1 (listed files actually unchanged)
-  committing changelog
-  updating the branch cache
+  note: graft of 13:7a4785234d87 created no changes to commit
   $ hg log -r 'destination(13)'
 All copies of a cset
   $ hg log -r 'origin(13) or destination(origin(13))'
@@ -794,58 +779,46 @@
   
   changeset:   21:7e61b508e709
   branch:      dev
-  user:        foo
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     2
-  
-  changeset:   22:3a4e92d81b97
-  branch:      dev
   tag:         tip
   user:        foo
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     2
   
 
-graft works on complex revset
+graft skips ancestors
 
-  $ hg graft 'origin(13) or destination(origin(13))'
+  $ hg graft 21 3
   skipping ancestor revision 21:7e61b508e709
-  skipping ancestor revision 22:3a4e92d81b97
-  skipping revision 2:5c095ad7e90f (already grafted to 22:3a4e92d81b97)
-  grafting 7:ef0ef43d49e7 "2"
-  warning: can't find ancestor for 'b' copied from 'a'!
-  grafting 13:7a4785234d87 "2"
-  warning: can't find ancestor for 'b' copied from 'a'!
-  grafting 19:9627f653b421 "2"
-  merging b
-  warning: can't find ancestor for 'b' copied from 'a'!
+  grafting 3:4c60f11aa304 "3"
+  merging b and c to c
 
 graft with --force (still doesn't graft merges)
 
   $ hg graft 19 0 6
   skipping ungraftable merge revision 6
   skipping ancestor revision 0:68795b066622
-  skipping already grafted revision 19:9627f653b421 (22:3a4e92d81b97 also has origin 2:5c095ad7e90f)
-  [255]
+  grafting 19:9627f653b421 "2"
+  merging b
+  note: graft of 19:9627f653b421 created no changes to commit
   $ hg graft 19 0 6 --force
   skipping ungraftable merge revision 6
   grafting 19:9627f653b421 "2"
   merging b
-  warning: can't find ancestor for 'b' copied from 'a'!
+  note: graft of 19:9627f653b421 created no changes to commit
   grafting 0:68795b066622 "0"
 
 graft --force after backout
 
   $ echo abc > a
-  $ hg ci -m 28
-  $ hg backout 28
+  $ hg ci -m 24
+  $ hg backout 24
   reverting a
-  changeset 29:9d95e865b00c backs out changeset 28:cc20d29aec8d
-  $ hg graft 28
-  skipping ancestor revision 28:cc20d29aec8d
+  changeset 25:71c4e63d4f98 backs out changeset 24:2e7ea477be26
+  $ hg graft 24
+  skipping ancestor revision 24:2e7ea477be26
   [255]
-  $ hg graft 28 --force
-  grafting 28:cc20d29aec8d "28"
+  $ hg graft 24 --force
+  grafting 24:2e7ea477be26 "24"
   merging a
   $ cat a
   abc
@@ -853,9 +826,9 @@
 graft --continue after --force
 
   $ echo def > a
-  $ hg ci -m 31
-  $ hg graft 28 --force --tool internal:fail
-  grafting 28:cc20d29aec8d "28"
+  $ hg ci -m 27
+  $ hg graft 24 --force --tool internal:fail
+  grafting 24:2e7ea477be26 "24"
   abort: unresolved conflicts, can't continue
   (use 'hg resolve' and 'hg graft --continue')
   [255]
@@ -868,7 +841,7 @@
   (no more unresolved files)
   continue: hg graft --continue
   $ hg graft -c
-  grafting 28:cc20d29aec8d "28"
+  grafting 24:2e7ea477be26 "24"
   $ cat a
   abc
 
@@ -885,12 +858,12 @@
 
 Empty graft
 
-  $ hg up -qr 26
+  $ hg up -qr 22
   $ hg tag -f something
-  $ hg graft -qr 27
-  $ hg graft -f 27
-  grafting 27:17d42b8f5d50 "28"
-  note: graft of 27:17d42b8f5d50 created no changes to commit
+  $ hg graft -qr 23
+  $ hg graft -f 23
+  grafting 23:72d9c7c75bcc "24"
+  note: graft of 23:72d9c7c75bcc created no changes to commit
 
   $ cd ..
 
@@ -931,1497 +904,3 @@
   |/
   o  0
   
-Graft from behind a move or rename
-==================================
-
-NOTE: This is affected by issue5343, and will need updating when it's fixed
-
-Consider this topology for a regular graft:
-
-o c1
-|
-| o c2
-| |
-| o ca # stands for "common ancestor"
-|/
-o cta # stands for "common topological ancestor"
-
-Note that in issue5343, ca==cta.
-
-The following table shows the possible cases. Here, "x->y" and, equivalently,
-"y<-x", where x is an ancestor of y, means that some copy happened from x to y.
-
-name | c1<-cta | cta<->ca | ca->c2
-A.0  |         |          |
-A.1  |    X    |          |
-A.2  |         |     X    |
-A.3  |         |          |   X
-A.4  |    X    |     X    |
-A.5  |    X    |          |   X
-A.6  |         |     X    |   X
-A.7  |    X    |     X    |   X
-
-A.0 is trivial, and doesn't need copy tracking.
-For A.1, a forward rename is recorded in the c1 pass, to be followed later.
-In A.2, the rename is recorded in the c2 pass and followed backwards.
-A.3 is recorded in the c2 pass as a forward rename to be duplicated on target.
-In A.4, both passes of checkcopies record incomplete renames, which are
-then joined in mergecopies to record a rename to be followed.
-In A.5 and A.7, the c1 pass records an incomplete rename, while the c2 pass
-records an incomplete divergence. The incomplete rename is then joined to the
-appropriate side of the incomplete divergence, and the result is recorded as a
-divergence. The code doesn't distinguish at all between these two cases, since
-the end result of them is the same: an incomplete divergence joined with an
-incomplete rename into a divergence.
-Finally, A.6 records a divergence entirely in the c2 pass.
-
-A.4 has a degenerate case a<-b<-a->a, where checkcopies isn't needed at all.
-A.5 has a special case a<-b<-b->a, which is treated like a<-b->a in a merge.
-A.5 has issue5343 as a special case.
-A.6 has a special case a<-a<-b->a. Here, checkcopies will find a spurious
-incomplete divergence, which is in fact complete. This is handled later in
-mergecopies.
-A.7 has 4 special cases: a<-b<-a->b (the "ping-pong" case), a<-b<-c->b,
-a<-b<-a->c and a<-b<-c->a. Of these, only the "ping-pong" case is interesting,
-the others are fairly trivial (a<-b<-c->b and a<-b<-a->c proceed like the base
-case, a<-b<-c->a is treated the same as a<-b<-b->a).
-
-f5a therefore tests the "ping-pong" rename case, where a file is renamed to the
-same name on both branches, then the rename is backed out on one branch, and
-the backout is grafted to the other branch. This creates a challenging rename
-sequence of a<-b<-a->b in the graft target, topological CA, graft CA and graft
-source, respectively. Since rename detection will run on the c1 side for such a
-sequence (as for technical reasons, we split the c1 and c2 sides not at the
-graft CA, but rather at the topological CA), it will pick up a false rename,
-and cause a spurious merge conflict. This false rename is always exactly the
-reverse of the true rename that would be detected on the c2 side, so we can
-correct for it by detecting this condition and reversing as necessary.
-
-First, set up the repository with commits to be grafted
-
-  $ hg init ../graftmove
-  $ cd ../graftmove
-  $ echo c1a > f1a
-  $ echo c2a > f2a
-  $ echo c3a > f3a
-  $ echo c4a > f4a
-  $ echo c5a > f5a
-  $ hg ci -qAm A0
-  $ hg mv f1a f1b
-  $ hg mv f3a f3b
-  $ hg mv f5a f5b
-  $ hg ci -qAm B0
-  $ echo c1c > f1b
-  $ hg mv f2a f2c
-  $ hg mv f5b f5a
-  $ echo c5c > f5a
-  $ hg ci -qAm C0
-  $ hg mv f3b f3d
-  $ echo c4d > f4a
-  $ hg ci -qAm D0
-  $ hg log -G
-  @  changeset:   3:b69f5839d2d9
-  |  tag:         tip
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     D0
-  |
-  o  changeset:   2:f58c7e2b28fa
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     C0
-  |
-  o  changeset:   1:3d7bba921b5d
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     B0
-  |
-  o  changeset:   0:11f7a1b56675
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     A0
-  
-
-Test the cases A.2 (f1x), A.3 (f2x) and a special case of A.6 (f5x) where the
-two renames actually converge to the same name (thus no actual divergence).
-
-  $ hg up -q 'desc("A0")'
-  $ HGEDITOR="echo C1 >" hg graft -r 'desc("C0")' --edit
-  grafting 2:f58c7e2b28fa "C0"
-  merging f1a and f1b to f1a
-  merging f5a
-  warning: can't find ancestor for 'f5a' copied from 'f5b'!
-  $ hg status --change .
-  M f1a
-  M f5a
-  A f2c
-  R f2a
-  $ hg cat f1a
-  c1c
-  $ hg cat f1b
-  f1b: no such file in rev c9763722f9bd
-  [1]
-
-Test the cases A.0 (f4x) and A.6 (f3x)
-
-  $ HGEDITOR="echo D1 >" hg graft -r 'desc("D0")' --edit
-  grafting 3:b69f5839d2d9 "D0"
-  note: possible conflict - f3b was renamed multiple times to:
-   f3a
-   f3d
-  warning: can't find ancestor for 'f3d' copied from 'f3b'!
-
-Set up the repository for some further tests
-
-  $ hg up -q "min(desc("A0"))"
-  $ hg mv f1a f1e
-  $ echo c2e > f2a
-  $ hg mv f3a f3e
-  $ hg mv f4a f4e
-  $ hg mv f5a f5b
-  $ hg ci -qAm "E0"
-  $ hg up -q "min(desc("A0"))"
-  $ hg cp f1a f1f
-  $ hg ci -qAm "F0"
-  $ hg up -q "min(desc("A0"))"
-  $ hg cp f1a f1g
-  $ echo c1g > f1g
-  $ hg ci -qAm "G0"
-  $ hg log -G
-  @  changeset:   8:ba67f08fb15a
-  |  tag:         tip
-  |  parent:      0:11f7a1b56675
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     G0
-  |
-  | o  changeset:   7:d376ab0d7fda
-  |/   parent:      0:11f7a1b56675
-  |    user:        test
-  |    date:        Thu Jan 01 00:00:00 1970 +0000
-  |    summary:     F0
-  |
-  | o  changeset:   6:6bd1736cab86
-  |/   parent:      0:11f7a1b56675
-  |    user:        test
-  |    date:        Thu Jan 01 00:00:00 1970 +0000
-  |    summary:     E0
-  |
-  | o  changeset:   5:560daee679da
-  | |  user:        test
-  | |  date:        Thu Jan 01 00:00:00 1970 +0000
-  | |  summary:     D1
-  | |
-  | o  changeset:   4:c9763722f9bd
-  |/   parent:      0:11f7a1b56675
-  |    user:        test
-  |    date:        Thu Jan 01 00:00:00 1970 +0000
-  |    summary:     C1
-  |
-  | o  changeset:   3:b69f5839d2d9
-  | |  user:        test
-  | |  date:        Thu Jan 01 00:00:00 1970 +0000
-  | |  summary:     D0
-  | |
-  | o  changeset:   2:f58c7e2b28fa
-  | |  user:        test
-  | |  date:        Thu Jan 01 00:00:00 1970 +0000
-  | |  summary:     C0
-  | |
-  | o  changeset:   1:3d7bba921b5d
-  |/   user:        test
-  |    date:        Thu Jan 01 00:00:00 1970 +0000
-  |    summary:     B0
-  |
-  o  changeset:   0:11f7a1b56675
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     A0
-  
-
-Test the cases A.4 (f1x), the "ping-pong" special case of A.7 (f5x),
-and A.3 with a local content change to be preserved (f2x).
-
-  $ hg up -q "desc("E0")"
-  $ HGEDITOR="echo C2 >" hg graft -r 'desc("C0")' --edit
-  grafting 2:f58c7e2b28fa "C0"
-  merging f1e and f1b to f1e
-  merging f2a and f2c to f2c
-
-Test the cases A.1 (f4x) and A.7 (f3x).
-
-  $ HGEDITOR="echo D2 >" hg graft -r 'desc("D0")' --edit
-  grafting 3:b69f5839d2d9 "D0"
-  note: possible conflict - f3b was renamed multiple times to:
-   f3d
-   f3e
-  merging f4e and f4a to f4e
-  warning: can't find ancestor for 'f3d' copied from 'f3b'!
-
-  $ hg cat f2c
-  c2e
-
-Test the case A.5 (move case, f1x).
-
-  $ hg up -q "desc("C0")"
-BROKEN: Shouldn't get the warning about missing ancestor
-  $ HGEDITOR="echo E1 >" hg graft -r 'desc("E0")' --edit
-  grafting 6:6bd1736cab86 "E0"
-  note: possible conflict - f1a was renamed multiple times to:
-   f1b
-   f1e
-  note: possible conflict - f3a was renamed multiple times to:
-   f3b
-   f3e
-  merging f2c and f2a to f2c
-  merging f5a and f5b to f5b
-  warning: can't find ancestor for 'f1e' copied from 'f1a'!
-  warning: can't find ancestor for 'f3e' copied from 'f3a'!
-  $ cat f1e
-  c1a
-
-Test the case A.5 (copy case, f1x).
-
-  $ hg up -q "desc("C0")"
-BROKEN: Shouldn't get the warning about missing ancestor
-  $ HGEDITOR="echo F1 >" hg graft -r 'desc("F0")' --edit
-  grafting 7:d376ab0d7fda "F0"
-  warning: can't find ancestor for 'f1f' copied from 'f1a'!
-BROKEN: f1f should be marked a copy from f1b
-  $ hg st --copies --change .
-  A f1f
-BROKEN: f1f should have the new content from f1b (i.e. "c1c")
-  $ cat f1f
-  c1a
-
-Test the case A.5 (copy+modify case, f1x).
-
-  $ hg up -q "desc("C0")"
-BROKEN: We should get a merge conflict from the 3-way merge between f1b in C0
-(content "c1c") and f1g in G0 (content "c1g") with f1a in A0 as base (content
-"c1a")
-  $ HGEDITOR="echo G1 >" hg graft -r 'desc("G0")' --edit
-  grafting 8:ba67f08fb15a "G0"
-  warning: can't find ancestor for 'f1g' copied from 'f1a'!
-
-Check the results of the grafts tested
-
-  $ hg log -CGv --patch --git
-  @  changeset:   13:ef3adf6c20a4
-  |  tag:         tip
-  |  parent:      2:f58c7e2b28fa
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  files:       f1g
-  |  description:
-  |  G1
-  |
-  |
-  |  diff --git a/f1g b/f1g
-  |  new file mode 100644
-  |  --- /dev/null
-  |  +++ b/f1g
-  |  @@ -0,0 +1,1 @@
-  |  +c1g
-  |
-  | o  changeset:   12:b5542d755b54
-  |/   parent:      2:f58c7e2b28fa
-  |    user:        test
-  |    date:        Thu Jan 01 00:00:00 1970 +0000
-  |    files:       f1f
-  |    description:
-  |    F1
-  |
-  |
-  |    diff --git a/f1f b/f1f
-  |    new file mode 100644
-  |    --- /dev/null
-  |    +++ b/f1f
-  |    @@ -0,0 +1,1 @@
-  |    +c1a
-  |
-  | o  changeset:   11:f8a162271246
-  |/   parent:      2:f58c7e2b28fa
-  |    user:        test
-  |    date:        Thu Jan 01 00:00:00 1970 +0000
-  |    files:       f1e f2c f3e f4a f4e f5a f5b
-  |    copies:      f4e (f4a) f5b (f5a)
-  |    description:
-  |    E1
-  |
-  |
-  |    diff --git a/f1e b/f1e
-  |    new file mode 100644
-  |    --- /dev/null
-  |    +++ b/f1e
-  |    @@ -0,0 +1,1 @@
-  |    +c1a
-  |    diff --git a/f2c b/f2c
-  |    --- a/f2c
-  |    +++ b/f2c
-  |    @@ -1,1 +1,1 @@
-  |    -c2a
-  |    +c2e
-  |    diff --git a/f3e b/f3e
-  |    new file mode 100644
-  |    --- /dev/null
-  |    +++ b/f3e
-  |    @@ -0,0 +1,1 @@
-  |    +c3a
-  |    diff --git a/f4a b/f4e
-  |    rename from f4a
-  |    rename to f4e
-  |    diff --git a/f5a b/f5b
-  |    rename from f5a
-  |    rename to f5b
-  |
-  | o  changeset:   10:93ee502e8b0a
-  | |  user:        test
-  | |  date:        Thu Jan 01 00:00:00 1970 +0000
-  | |  files:       f3d f4e
-  | |  description:
-  | |  D2
-  | |
-  | |
-  | |  diff --git a/f3d b/f3d
-  | |  new file mode 100644
-  | |  --- /dev/null
-  | |  +++ b/f3d
-  | |  @@ -0,0 +1,1 @@
-  | |  +c3a
-  | |  diff --git a/f4e b/f4e
-  | |  --- a/f4e
-  | |  +++ b/f4e
-  | |  @@ -1,1 +1,1 @@
-  | |  -c4a
-  | |  +c4d
-  | |
-  | o  changeset:   9:539cf145f496
-  | |  parent:      6:6bd1736cab86
-  | |  user:        test
-  | |  date:        Thu Jan 01 00:00:00 1970 +0000
-  | |  files:       f1e f2a f2c f5a f5b
-  | |  copies:      f2c (f2a) f5a (f5b)
-  | |  description:
-  | |  C2
-  | |
-  | |
-  | |  diff --git a/f1e b/f1e
-  | |  --- a/f1e
-  | |  +++ b/f1e
-  | |  @@ -1,1 +1,1 @@
-  | |  -c1a
-  | |  +c1c
-  | |  diff --git a/f2a b/f2c
-  | |  rename from f2a
-  | |  rename to f2c
-  | |  diff --git a/f5b b/f5a
-  | |  rename from f5b
-  | |  rename to f5a
-  | |  --- a/f5b
-  | |  +++ b/f5a
-  | |  @@ -1,1 +1,1 @@
-  | |  -c5a
-  | |  +c5c
-  | |
-  | | o  changeset:   8:ba67f08fb15a
-  | | |  parent:      0:11f7a1b56675
-  | | |  user:        test
-  | | |  date:        Thu Jan 01 00:00:00 1970 +0000
-  | | |  files:       f1g
-  | | |  copies:      f1g (f1a)
-  | | |  description:
-  | | |  G0
-  | | |
-  | | |
-  | | |  diff --git a/f1a b/f1g
-  | | |  copy from f1a
-  | | |  copy to f1g
-  | | |  --- a/f1a
-  | | |  +++ b/f1g
-  | | |  @@ -1,1 +1,1 @@
-  | | |  -c1a
-  | | |  +c1g
-  | | |
-  | | | o  changeset:   7:d376ab0d7fda
-  | | |/   parent:      0:11f7a1b56675
-  | | |    user:        test
-  | | |    date:        Thu Jan 01 00:00:00 1970 +0000
-  | | |    files:       f1f
-  | | |    copies:      f1f (f1a)
-  | | |    description:
-  | | |    F0
-  | | |
-  | | |
-  | | |    diff --git a/f1a b/f1f
-  | | |    copy from f1a
-  | | |    copy to f1f
-  | | |
-  | o |  changeset:   6:6bd1736cab86
-  | |/   parent:      0:11f7a1b56675
-  | |    user:        test
-  | |    date:        Thu Jan 01 00:00:00 1970 +0000
-  | |    files:       f1a f1e f2a f3a f3e f4a f4e f5a f5b
-  | |    copies:      f1e (f1a) f3e (f3a) f4e (f4a) f5b (f5a)
-  | |    description:
-  | |    E0
-  | |
-  | |
-  | |    diff --git a/f1a b/f1e
-  | |    rename from f1a
-  | |    rename to f1e
-  | |    diff --git a/f2a b/f2a
-  | |    --- a/f2a
-  | |    +++ b/f2a
-  | |    @@ -1,1 +1,1 @@
-  | |    -c2a
-  | |    +c2e
-  | |    diff --git a/f3a b/f3e
-  | |    rename from f3a
-  | |    rename to f3e
-  | |    diff --git a/f4a b/f4e
-  | |    rename from f4a
-  | |    rename to f4e
-  | |    diff --git a/f5a b/f5b
-  | |    rename from f5a
-  | |    rename to f5b
-  | |
-  | | o  changeset:   5:560daee679da
-  | | |  user:        test
-  | | |  date:        Thu Jan 01 00:00:00 1970 +0000
-  | | |  files:       f3d f4a
-  | | |  description:
-  | | |  D1
-  | | |
-  | | |
-  | | |  diff --git a/f3d b/f3d
-  | | |  new file mode 100644
-  | | |  --- /dev/null
-  | | |  +++ b/f3d
-  | | |  @@ -0,0 +1,1 @@
-  | | |  +c3a
-  | | |  diff --git a/f4a b/f4a
-  | | |  --- a/f4a
-  | | |  +++ b/f4a
-  | | |  @@ -1,1 +1,1 @@
-  | | |  -c4a
-  | | |  +c4d
-  | | |
-  | | o  changeset:   4:c9763722f9bd
-  | |/   parent:      0:11f7a1b56675
-  | |    user:        test
-  | |    date:        Thu Jan 01 00:00:00 1970 +0000
-  | |    files:       f1a f2a f2c f5a
-  | |    copies:      f2c (f2a)
-  | |    description:
-  | |    C1
-  | |
-  | |
-  | |    diff --git a/f1a b/f1a
-  | |    --- a/f1a
-  | |    +++ b/f1a
-  | |    @@ -1,1 +1,1 @@
-  | |    -c1a
-  | |    +c1c
-  | |    diff --git a/f2a b/f2c
-  | |    rename from f2a
-  | |    rename to f2c
-  | |    diff --git a/f5a b/f5a
-  | |    --- a/f5a
-  | |    +++ b/f5a
-  | |    @@ -1,1 +1,1 @@
-  | |    -c5a
-  | |    +c5c
-  | |
-  +---o  changeset:   3:b69f5839d2d9
-  | |    user:        test
-  | |    date:        Thu Jan 01 00:00:00 1970 +0000
-  | |    files:       f3b f3d f4a
-  | |    copies:      f3d (f3b)
-  | |    description:
-  | |    D0
-  | |
-  | |
-  | |    diff --git a/f3b b/f3d
-  | |    rename from f3b
-  | |    rename to f3d
-  | |    diff --git a/f4a b/f4a
-  | |    --- a/f4a
-  | |    +++ b/f4a
-  | |    @@ -1,1 +1,1 @@
-  | |    -c4a
-  | |    +c4d
-  | |
-  o |  changeset:   2:f58c7e2b28fa
-  | |  user:        test
-  | |  date:        Thu Jan 01 00:00:00 1970 +0000
-  | |  files:       f1b f2a f2c f5a f5b
-  | |  copies:      f2c (f2a) f5a (f5b)
-  | |  description:
-  | |  C0
-  | |
-  | |
-  | |  diff --git a/f1b b/f1b
-  | |  --- a/f1b
-  | |  +++ b/f1b
-  | |  @@ -1,1 +1,1 @@
-  | |  -c1a
-  | |  +c1c
-  | |  diff --git a/f2a b/f2c
-  | |  rename from f2a
-  | |  rename to f2c
-  | |  diff --git a/f5b b/f5a
-  | |  rename from f5b
-  | |  rename to f5a
-  | |  --- a/f5b
-  | |  +++ b/f5a
-  | |  @@ -1,1 +1,1 @@
-  | |  -c5a
-  | |  +c5c
-  | |
-  o |  changeset:   1:3d7bba921b5d
-  |/   user:        test
-  |    date:        Thu Jan 01 00:00:00 1970 +0000
-  |    files:       f1a f1b f3a f3b f5a f5b
-  |    copies:      f1b (f1a) f3b (f3a) f5b (f5a)
-  |    description:
-  |    B0
-  |
-  |
-  |    diff --git a/f1a b/f1b
-  |    rename from f1a
-  |    rename to f1b
-  |    diff --git a/f3a b/f3b
-  |    rename from f3a
-  |    rename to f3b
-  |    diff --git a/f5a b/f5b
-  |    rename from f5a
-  |    rename to f5b
-  |
-  o  changeset:   0:11f7a1b56675
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     files:       f1a f2a f3a f4a f5a
-     description:
-     A0
-  
-  
-     diff --git a/f1a b/f1a
-     new file mode 100644
-     --- /dev/null
-     +++ b/f1a
-     @@ -0,0 +1,1 @@
-     +c1a
-     diff --git a/f2a b/f2a
-     new file mode 100644
-     --- /dev/null
-     +++ b/f2a
-     @@ -0,0 +1,1 @@
-     +c2a
-     diff --git a/f3a b/f3a
-     new file mode 100644
-     --- /dev/null
-     +++ b/f3a
-     @@ -0,0 +1,1 @@
-     +c3a
-     diff --git a/f4a b/f4a
-     new file mode 100644
-     --- /dev/null
-     +++ b/f4a
-     @@ -0,0 +1,1 @@
-     +c4a
-     diff --git a/f5a b/f5a
-     new file mode 100644
-     --- /dev/null
-     +++ b/f5a
-     @@ -0,0 +1,1 @@
-     +c5a
-  
-Check superfluous filemerge of files renamed in the past but untouched by graft
-
-  $ echo a > a
-  $ hg ci -qAma
-  $ hg mv a b
-  $ echo b > b
-  $ hg ci -qAmb
-  $ echo c > c
-  $ hg ci -qAmc
-  $ hg up -q .~2
-  $ hg graft tip -qt:fail
-
-  $ cd ..
-
-Graft a change into a new file previously grafted into a renamed directory
-
-  $ hg init dirmovenewfile
-  $ cd dirmovenewfile
-  $ mkdir a
-  $ echo a > a/a
-  $ hg ci -qAma
-  $ echo x > a/x
-  $ hg ci -qAmx
-  $ hg up -q 0
-  $ hg mv -q a b
-  $ hg ci -qAmb
-  $ hg graft -q 1 # a/x grafted as b/x, but no copy information recorded
-  $ hg up -q 1
-  $ echo y > a/x
-  $ hg ci -qAmy
-  $ hg up -q 3
-  $ hg graft -q 4
-  $ hg status --change .
-  M b/x
-
-Prepare for test of skipped changesets and how merges can influence it:
-
-  $ hg merge -q -r 1 --tool :local
-  $ hg ci -m m
-  $ echo xx >> b/x
-  $ hg ci -m xx
-
-  $ hg log -G -T '{rev} {desc|firstline}'
-  @  7 xx
-  |
-  o    6 m
-  |\
-  | o  5 y
-  | |
-  +---o  4 y
-  | |
-  | o  3 x
-  | |
-  | o  2 b
-  | |
-  o |  1 x
-  |/
-  o  0 a
-  
-Grafting of plain changes correctly detects that 3 and 5 should be skipped:
-
-  $ hg up -qCr 4
-  $ hg graft --tool :local -r 2::5
-  skipping already grafted revision 3:ca093ca2f1d9 (was grafted from 1:13ec5badbf2a)
-  skipping already grafted revision 5:43e9eb70dab0 (was grafted from 4:6c9a1289e5f1)
-  grafting 2:42127f193bcd "b"
-
-Extending the graft range to include a (skipped) merge of 3 will not prevent us from
-also detecting that both 3 and 5 should be skipped:
-
-  $ hg up -qCr 4
-  $ hg graft --tool :local -r 2::7
-  skipping ungraftable merge revision 6
-  skipping already grafted revision 3:ca093ca2f1d9 (was grafted from 1:13ec5badbf2a)
-  skipping already grafted revision 5:43e9eb70dab0 (was grafted from 4:6c9a1289e5f1)
-  grafting 2:42127f193bcd "b"
-  grafting 7:d3c3f2b38ecc "xx"
-  note: graft of 7:d3c3f2b38ecc created no changes to commit
-
-  $ cd ..
-
-Grafted revision should be warned and skipped only once. (issue6024)
-
-  $ mkdir issue6024
-  $ cd issue6024
-
-  $ hg init base
-  $ cd base
-  $ touch x
-  $ hg commit -qAminit
-  $ echo a > x
-  $ hg commit -mchange
-  $ hg update -q 0
-  $ hg graft -r 1
-  grafting 1:a0b923c546aa "change" (tip)
-  $ cd ..
-
-  $ hg clone -qr 2 base clone
-  $ cd clone
-  $ hg pull -q
-  $ hg merge -q 2
-  $ hg commit -mmerge
-  $ hg update -q 0
-  $ hg graft -r 1
-  grafting 1:04fc6d444368 "change"
-  $ hg update -q 3
-  $ hg log -G -T '{rev}:{node|shortest} <- {extras.source|shortest}\n'
-  o  4:4e16 <- a0b9
-  |
-  | @    3:f0ac <-
-  | |\
-  +---o  2:a0b9 <-
-  | |
-  | o  1:04fc <- a0b9
-  |/
-  o  0:7848 <-
-  
-
- the source of rev 4 is an ancestor of the working parent, and was also
- grafted as rev 1. it should be stripped from the target revisions only once.
-
-  $ hg graft -r 4
-  skipping already grafted revision 4:4e16bab40c9c (1:04fc6d444368 also has origin 2:a0b923c546aa)
-  [255]
-
-  $ cd ../..
-
-Testing the reading of old format graftstate file with newer mercurial
-
-  $ hg init oldgraft
-  $ cd oldgraft
-  $ for ch in a b c; do echo foo > $ch; hg add $ch; hg ci -Aqm "added "$ch; done;
-  $ hg log -GT "{rev}:{node|short} {desc}\n"
-  @  2:8be98ac1a569 added c
-  |
-  o  1:80e6d2c47cfe added b
-  |
-  o  0:f7ad41964313 added a
-  
-  $ hg up 0
-  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
-  $ echo bar > b
-  $ hg add b
-  $ hg ci -m "bar to b"
-  created new head
-  $ hg graft -r 1 -r 2
-  grafting 1:80e6d2c47cfe "added b"
-  merging b
-  warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-Writing the nodes in old format to graftstate
-
-  $ hg log -r 1 -r 2 -T '{node}\n' > .hg/graftstate
-  $ echo foo > b
-  $ hg resolve -m
-  (no more unresolved files)
-  continue: hg graft --continue
-  $ hg graft --continue
-  grafting 1:80e6d2c47cfe "added b"
-  grafting 2:8be98ac1a569 "added c"
-
-Testing that --user is preserved during conflicts and value is reused while
-running `hg graft --continue`
-
-  $ hg log -G
-  @  changeset:   5:711e9fa999f1
-  |  tag:         tip
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     added c
-  |
-  o  changeset:   4:e5ad7353b408
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     added b
-  |
-  o  changeset:   3:9e887f7a939c
-  |  parent:      0:f7ad41964313
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     bar to b
-  |
-  | o  changeset:   2:8be98ac1a569
-  | |  user:        test
-  | |  date:        Thu Jan 01 00:00:00 1970 +0000
-  | |  summary:     added c
-  | |
-  | o  changeset:   1:80e6d2c47cfe
-  |/   user:        test
-  |    date:        Thu Jan 01 00:00:00 1970 +0000
-  |    summary:     added b
-  |
-  o  changeset:   0:f7ad41964313
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     added a
-  
-
-  $ hg up '.^^'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
-
-  $ hg graft -r 1 -r 2 --user batman
-  grafting 1:80e6d2c47cfe "added b"
-  merging b
-  warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-  $ echo wat > b
-  $ hg resolve -m
-  (no more unresolved files)
-  continue: hg graft --continue
-
-  $ hg graft --continue
-  grafting 1:80e6d2c47cfe "added b"
-  grafting 2:8be98ac1a569 "added c"
-
-  $ hg log -Gr 3::
-  @  changeset:   7:11a36ffaacf2
-  |  tag:         tip
-  |  user:        batman
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     added c
-  |
-  o  changeset:   6:76803afc6511
-  |  parent:      3:9e887f7a939c
-  |  user:        batman
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     added b
-  |
-  | o  changeset:   5:711e9fa999f1
-  | |  user:        test
-  | |  date:        Thu Jan 01 00:00:00 1970 +0000
-  | |  summary:     added c
-  | |
-  | o  changeset:   4:e5ad7353b408
-  |/   user:        test
-  |    date:        Thu Jan 01 00:00:00 1970 +0000
-  |    summary:     added b
-  |
-  o  changeset:   3:9e887f7a939c
-  |  parent:      0:f7ad41964313
-  ~  user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     bar to b
-  
-Test that --date is preserved and reused in `hg graft --continue`
-
-  $ hg up '.^^'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ hg graft -r 1 -r 2 --date '1234560000 120'
-  grafting 1:80e6d2c47cfe "added b"
-  merging b
-  warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-  $ echo foobar > b
-  $ hg resolve -m
-  (no more unresolved files)
-  continue: hg graft --continue
-  $ hg graft --continue
-  grafting 1:80e6d2c47cfe "added b"
-  grafting 2:8be98ac1a569 "added c"
-
-  $ hg log -Gr '.^^::.'
-  @  changeset:   9:1896b76e007a
-  |  tag:         tip
-  |  user:        test
-  |  date:        Fri Feb 13 21:18:00 2009 -0002
-  |  summary:     added c
-  |
-  o  changeset:   8:ce2b4f1632af
-  |  parent:      3:9e887f7a939c
-  |  user:        test
-  |  date:        Fri Feb 13 21:18:00 2009 -0002
-  |  summary:     added b
-  |
-  o  changeset:   3:9e887f7a939c
-  |  parent:      0:f7ad41964313
-  ~  user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     bar to b
-  
-Test that --log is preserved and reused in `hg graft --continue`
-
-  $ hg up '.^^'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ hg graft -r 1 -r 2 --log
-  grafting 1:80e6d2c47cfe "added b"
-  merging b
-  warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-  $ echo foobar > b
-  $ hg resolve -m
-  (no more unresolved files)
-  continue: hg graft --continue
-
-  $ hg graft --continue
-  grafting 1:80e6d2c47cfe "added b"
-  grafting 2:8be98ac1a569 "added c"
-
-  $ hg log -GT "{rev}:{node|short} {desc}" -r '.^^::.'
-  @  11:30c1050a58b2 added c
-  |  (grafted from 8be98ac1a56990c2d9ca6861041b8390af7bd6f3)
-  o  10:ec7eda2313e2 added b
-  |  (grafted from 80e6d2c47cfe5b3185519568327a17a061c7efb6)
-  o  3:9e887f7a939c bar to b
-  |
-  ~
-
-  $ cd ..
-
-Testing the --stop flag of `hg graft` which stops the interrupted graft
-
-  $ hg init stopgraft
-  $ cd stopgraft
-  $ for ch in a b c d; do echo $ch > $ch; hg add $ch; hg ci -Aqm "added "$ch; done;
-
-  $ hg log -G
-  @  changeset:   3:9150fe93bec6
-  |  tag:         tip
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     added d
-  |
-  o  changeset:   2:155349b645be
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     added c
-  |
-  o  changeset:   1:5f6d8a4bf34a
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     added b
-  |
-  o  changeset:   0:9092f1db7931
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     added a
-  
-  $ hg up '.^^'
-  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
-
-  $ echo foo > d
-  $ hg ci -Aqm "added foo to d"
-
-  $ hg graft --stop
-  abort: no interrupted graft found
-  [255]
-
-  $ hg graft -r 3
-  grafting 3:9150fe93bec6 "added d"
-  merging d
-  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-  $ hg graft --stop --continue
-  abort: cannot use '--continue' and '--stop' together
-  [255]
-
-  $ hg graft --stop -U
-  abort: cannot specify any other flag with '--stop'
-  [255]
-  $ hg graft --stop --rev 4
-  abort: cannot specify any other flag with '--stop'
-  [255]
-  $ hg graft --stop --log
-  abort: cannot specify any other flag with '--stop'
-  [255]
-
-  $ hg graft --stop
-  stopped the interrupted graft
-  working directory is now at a0deacecd59d
-
-  $ hg diff
-
-  $ hg log -Gr '.'
-  @  changeset:   4:a0deacecd59d
-  |  tag:         tip
-  ~  parent:      1:5f6d8a4bf34a
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     added foo to d
-  
-  $ hg graft -r 2 -r 3
-  grafting 2:155349b645be "added c"
-  grafting 3:9150fe93bec6 "added d"
-  merging d
-  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-  $ hg graft --stop
-  stopped the interrupted graft
-  working directory is now at 75b447541a9e
-
-  $ hg diff
-
-  $ hg log -G -T "{rev}:{node|short} {desc}"
-  @  5:75b447541a9e added c
-  |
-  o  4:a0deacecd59d added foo to d
-  |
-  | o  3:9150fe93bec6 added d
-  | |
-  | o  2:155349b645be added c
-  |/
-  o  1:5f6d8a4bf34a added b
-  |
-  o  0:9092f1db7931 added a
-  
-  $ cd ..
-
-Testing the --abort flag for `hg graft` which aborts and rollback to state
-before the graft
-
-  $ hg init abortgraft
-  $ cd abortgraft
-  $ for ch in a b c d; do echo $ch > $ch; hg add $ch; hg ci -Aqm "added "$ch; done;
-
-  $ hg up '.^^'
-  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
-
-  $ echo x > x
-  $ hg ci -Aqm "added x"
-  $ hg up '.^'
-  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ echo foo > c
-  $ hg ci -Aqm "added foo to c"
-
-  $ hg log -GT "{rev}:{node|short} {desc}"
-  @  5:36b793615f78 added foo to c
-  |
-  | o  4:863a25e1a9ea added x
-  |/
-  | o  3:9150fe93bec6 added d
-  | |
-  | o  2:155349b645be added c
-  |/
-  o  1:5f6d8a4bf34a added b
-  |
-  o  0:9092f1db7931 added a
-  
-  $ hg up 9150fe93bec6
-  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-  $ hg abort
-  abort: no interrupted graft to abort (abortflag !)
-  abort: no operation in progress (abortcommand !)
-  [255]
-
-when stripping is required
-  $ hg graft -r 4 -r 5
-  grafting 4:863a25e1a9ea "added x"
-  grafting 5:36b793615f78 "added foo to c" (tip)
-  merging c
-  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-  $ hg graft --continue --abort
-  abort: cannot use '--continue' and '--abort' together
-  [255]
-
-  $ hg graft --abort --stop
-  abort: cannot use '--abort' and '--stop' together
-  [255]
-
-  $ hg graft --abort --currentuser
-  abort: cannot specify any other flag with '--abort'
-  [255]
-
-  $ hg graft --abort --edit
-  abort: cannot specify any other flag with '--abort'
-  [255]
-
-#if abortcommand
-when in dry-run mode
-  $ hg abort --dry-run
-  graft in progress, will be aborted
-#endif
-
-  $ hg abort
-  graft aborted
-  working directory is now at 9150fe93bec6
-  $ hg log -GT "{rev}:{node|short} {desc}"
-  o  5:36b793615f78 added foo to c
-  |
-  | o  4:863a25e1a9ea added x
-  |/
-  | @  3:9150fe93bec6 added d
-  | |
-  | o  2:155349b645be added c
-  |/
-  o  1:5f6d8a4bf34a added b
-  |
-  o  0:9092f1db7931 added a
-  
-when stripping is not required
-  $ hg graft -r 5
-  grafting 5:36b793615f78 "added foo to c" (tip)
-  merging c
-  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-  $ hg abort
-  graft aborted
-  working directory is now at 9150fe93bec6
-  $ hg log -GT "{rev}:{node|short} {desc}"
-  o  5:36b793615f78 added foo to c
-  |
-  | o  4:863a25e1a9ea added x
-  |/
-  | @  3:9150fe93bec6 added d
-  | |
-  | o  2:155349b645be added c
-  |/
-  o  1:5f6d8a4bf34a added b
-  |
-  o  0:9092f1db7931 added a
-  
-when some of the changesets became public
-
-  $ hg graft -r 4 -r 5
-  grafting 4:863a25e1a9ea "added x"
-  grafting 5:36b793615f78 "added foo to c" (tip)
-  merging c
-  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-  $ hg log -GT "{rev}:{node|short} {desc}"
-  @  6:6ec71c037d94 added x
-  |
-  | o  5:36b793615f78 added foo to c
-  | |
-  | | o  4:863a25e1a9ea added x
-  | |/
-  o |  3:9150fe93bec6 added d
-  | |
-  o |  2:155349b645be added c
-  |/
-  o  1:5f6d8a4bf34a added b
-  |
-  o  0:9092f1db7931 added a
-  
-  $ hg phase -r 6 --public
-
-  $ hg abort
-  cannot clean up public changesets 6ec71c037d94
-  graft aborted
-  working directory is now at 6ec71c037d94
-
-when we created new changesets on top of existing one
-
-  $ hg up '.^^'
-  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
-  $ echo y > y
-  $ hg ci -Aqm "added y"
-  $ echo z > z
-  $ hg ci -Aqm "added z"
-
-  $ hg up 3
-  1 files updated, 0 files merged, 3 files removed, 0 files unresolved
-  $ hg log -GT "{rev}:{node|short} {desc}"
-  o  8:637f9e9bbfd4 added z
-  |
-  o  7:123221671fd4 added y
-  |
-  | o  6:6ec71c037d94 added x
-  | |
-  | | o  5:36b793615f78 added foo to c
-  | | |
-  | | | o  4:863a25e1a9ea added x
-  | | |/
-  | @ |  3:9150fe93bec6 added d
-  |/ /
-  o /  2:155349b645be added c
-  |/
-  o  1:5f6d8a4bf34a added b
-  |
-  o  0:9092f1db7931 added a
-  
-  $ hg graft -r 8 -r 7 -r 5
-  grafting 8:637f9e9bbfd4 "added z" (tip)
-  grafting 7:123221671fd4 "added y"
-  grafting 5:36b793615f78 "added foo to c"
-  merging c
-  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-  $ cd ..
-  $ hg init pullrepo
-  $ cd pullrepo
-  $ cat >> .hg/hgrc <<EOF
-  > [phases]
-  > publish=False
-  > EOF
-  $ hg pull ../abortgraft --config phases.publish=False
-  pulling from ../abortgraft
-  requesting all changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 11 changesets with 9 changes to 8 files (+4 heads)
-  new changesets 9092f1db7931:6b98ff0062dd (6 drafts)
-  (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg up 9
-  5 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ echo w > w
-  $ hg ci -Aqm "added w" --config phases.publish=False
-
-  $ cd ../abortgraft
-  $ hg pull ../pullrepo
-  pulling from ../pullrepo
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets 311dfc6cf3bf (1 drafts)
-  (run 'hg heads .' to see heads, 'hg merge' to merge)
-
-  $ hg abort
-  new changesets detected on destination branch, can't strip
-  graft aborted
-  working directory is now at 6b98ff0062dd
-
-  $ cd ..
-
-============================
-Testing --no-commit option:|
-============================
-
-  $ hg init nocommit
-  $ cd nocommit
-  $ echo a > a
-  $ hg ci -qAma
-  $ echo b > b
-  $ hg ci -qAmb
-  $ hg up -q 0
-  $ echo c > c
-  $ hg ci -qAmc
-  $ hg log -GT "{rev}:{node|short} {desc}\n"
-  @  2:d36c0562f908 c
-  |
-  | o  1:d2ae7f538514 b
-  |/
-  o  0:cb9a9f314b8b a
-  
-
-Check reporting when --no-commit used with non-applicable options:
-
-  $ hg graft 1 --no-commit -e
-  abort: cannot specify --no-commit and --edit together
-  [255]
-
-  $ hg graft 1 --no-commit --log
-  abort: cannot specify --no-commit and --log together
-  [255]
-
-  $ hg graft 1 --no-commit -D
-  abort: cannot specify --no-commit and --currentdate together
-  [255]
-
-Test --no-commit is working:
-  $ hg graft 1 --no-commit
-  grafting 1:d2ae7f538514 "b"
-
-  $ hg log -GT "{rev}:{node|short} {desc}\n"
-  @  2:d36c0562f908 c
-  |
-  | o  1:d2ae7f538514 b
-  |/
-  o  0:cb9a9f314b8b a
-  
-
-  $ hg diff
-  diff -r d36c0562f908 b
-  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/b	Thu Jan 01 00:00:00 1970 +0000
-  @@ -0,0 +1,1 @@
-  +b
-
-Prepare wrdir to check --no-commit is resepected after --continue:
-
-  $ hg up -qC
-  $ echo A>a
-  $ hg ci -qm "A in file a"
-  $ hg up -q 1
-  $ echo B>a
-  $ hg ci -qm "B in file a"
-  $ hg log -GT "{rev}:{node|short} {desc}\n"
-  @  4:2aa9ad1006ff B in file a
-  |
-  | o  3:09e253b87e17 A in file a
-  | |
-  | o  2:d36c0562f908 c
-  | |
-  o |  1:d2ae7f538514 b
-  |/
-  o  0:cb9a9f314b8b a
-  
-
-  $ hg graft 3 --no-commit
-  grafting 3:09e253b87e17 "A in file a"
-  merging a
-  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-Resolve conflict:
-  $ echo A>a
-  $ hg resolve --mark
-  (no more unresolved files)
-  continue: hg graft --continue
-
-  $ hg graft --continue
-  grafting 3:09e253b87e17 "A in file a"
-  $ hg log -GT "{rev}:{node|short} {desc}\n"
-  @  4:2aa9ad1006ff B in file a
-  |
-  | o  3:09e253b87e17 A in file a
-  | |
-  | o  2:d36c0562f908 c
-  | |
-  o |  1:d2ae7f538514 b
-  |/
-  o  0:cb9a9f314b8b a
-  
-  $ hg diff
-  diff -r 2aa9ad1006ff a
-  --- a/a	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
-  @@ -1,1 +1,1 @@
-  -B
-  +A
-
-  $ hg up -qC
-
-Check --no-commit is resepected when passed with --continue:
-
-  $ hg graft 3
-  grafting 3:09e253b87e17 "A in file a"
-  merging a
-  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-Resolve conflict:
-  $ echo A>a
-  $ hg resolve --mark
-  (no more unresolved files)
-  continue: hg graft --continue
-
-  $ hg graft --continue --no-commit
-  grafting 3:09e253b87e17 "A in file a"
-  $ hg diff
-  diff -r 2aa9ad1006ff a
-  --- a/a	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
-  @@ -1,1 +1,1 @@
-  -B
-  +A
-
-  $ hg log -GT "{rev}:{node|short} {desc}\n"
-  @  4:2aa9ad1006ff B in file a
-  |
-  | o  3:09e253b87e17 A in file a
-  | |
-  | o  2:d36c0562f908 c
-  | |
-  o |  1:d2ae7f538514 b
-  |/
-  o  0:cb9a9f314b8b a
-  
-  $ hg up -qC
-
-Test --no-commit when graft multiple revisions:
-When there is conflict:
-  $ hg graft -r "2::3" --no-commit
-  grafting 2:d36c0562f908 "c"
-  grafting 3:09e253b87e17 "A in file a"
-  merging a
-  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
-  abort: unresolved conflicts, can't continue
-  (use 'hg resolve' and 'hg graft --continue')
-  [255]
-
-  $ echo A>a
-  $ hg resolve --mark
-  (no more unresolved files)
-  continue: hg graft --continue
-  $ hg graft --continue
-  grafting 3:09e253b87e17 "A in file a"
-  $ hg diff
-  diff -r 2aa9ad1006ff a
-  --- a/a	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
-  @@ -1,1 +1,1 @@
-  -B
-  +A
-  diff -r 2aa9ad1006ff c
-  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
-  @@ -0,0 +1,1 @@
-  +c
-
-  $ hg log -GT "{rev}:{node|short} {desc}\n"
-  @  4:2aa9ad1006ff B in file a
-  |
-  | o  3:09e253b87e17 A in file a
-  | |
-  | o  2:d36c0562f908 c
-  | |
-  o |  1:d2ae7f538514 b
-  |/
-  o  0:cb9a9f314b8b a
-  
-  $ hg up -qC
-
-When there is no conflict:
-  $ echo d>d
-  $ hg add d -q
-  $ hg ci -qmd
-  $ hg up 3 -q
-  $ hg log -GT "{rev}:{node|short} {desc}\n"
-  o  5:baefa8927fc0 d
-  |
-  o  4:2aa9ad1006ff B in file a
-  |
-  | @  3:09e253b87e17 A in file a
-  | |
-  | o  2:d36c0562f908 c
-  | |
-  o |  1:d2ae7f538514 b
-  |/
-  o  0:cb9a9f314b8b a
-  
-
-  $ hg graft -r 1 -r 5 --no-commit
-  grafting 1:d2ae7f538514 "b"
-  grafting 5:baefa8927fc0 "d" (tip)
-  $ hg diff
-  diff -r 09e253b87e17 b
-  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/b	Thu Jan 01 00:00:00 1970 +0000
-  @@ -0,0 +1,1 @@
-  +b
-  diff -r 09e253b87e17 d
-  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/d	Thu Jan 01 00:00:00 1970 +0000
-  @@ -0,0 +1,1 @@
-  +d
-  $ hg log -GT "{rev}:{node|short} {desc}\n"
-  o  5:baefa8927fc0 d
-  |
-  o  4:2aa9ad1006ff B in file a
-  |
-  | @  3:09e253b87e17 A in file a
-  | |
-  | o  2:d36c0562f908 c
-  | |
-  o |  1:d2ae7f538514 b
-  |/
-  o  0:cb9a9f314b8b a
-  
-  $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hashutil.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,80 @@
+# Tests to ensure that sha1dc.sha1 is exactly a drop-in for
+# hashlib.sha1 for our needs.
+from __future__ import absolute_import
+
+import hashlib
+import unittest
+
+import silenttestrunner
+
+try:
+    from mercurial.thirdparty import sha1dc
+except ImportError:
+    sha1dc = None
+
+
+class hashertestsbase(object):
+    def test_basic_hash(self):
+        h = self.hasher()
+        h.update(b'foo')
+        self.assertEqual(
+            '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33', h.hexdigest()
+        )
+        h.update(b'bar')
+        self.assertEqual(
+            '8843d7f92416211de9ebb963ff4ce28125932878', h.hexdigest()
+        )
+
+    def test_copy_hasher(self):
+        h = self.hasher()
+        h.update(b'foo')
+        h2 = h.copy()
+        h.update(b'baz')
+        h2.update(b'bar')
+        self.assertEqual(
+            '21eb6533733a5e4763acacd1d45a60c2e0e404e1', h.hexdigest()
+        )
+        self.assertEqual(
+            '8843d7f92416211de9ebb963ff4ce28125932878', h2.hexdigest()
+        )
+
+    def test_init_hasher(self):
+        h = self.hasher(b'initial string')
+        self.assertEqual(
+            b'\xc9y|n\x1f3S\xa4:\xbaJ\xca,\xc1\x1a\x9e\xb8\xd8\xdd\x86',
+            h.digest(),
+        )
+
+    def test_bytes_like_types(self):
+        h = self.hasher()
+        h.update(bytearray(b'foo'))
+        h.update(memoryview(b'baz'))
+        self.assertEqual(
+            '21eb6533733a5e4763acacd1d45a60c2e0e404e1', h.hexdigest()
+        )
+
+        h = self.hasher(bytearray(b'foo'))
+        h.update(b'baz')
+        self.assertEqual(
+            '21eb6533733a5e4763acacd1d45a60c2e0e404e1', h.hexdigest()
+        )
+
+        h = self.hasher(memoryview(b'foo'))
+        h.update(b'baz')
+        self.assertEqual(
+            '21eb6533733a5e4763acacd1d45a60c2e0e404e1', h.hexdigest()
+        )
+
+
+class hashlibtests(unittest.TestCase, hashertestsbase):
+    hasher = hashlib.sha1
+
+
+if sha1dc:
+
+    class sha1dctests(unittest.TestCase, hashertestsbase):
+        hasher = sha1dc.sha1
+
+
+if __name__ == '__main__':
+    silenttestrunner.main(__name__)
--- a/tests/test-hgrc.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-hgrc.t	Tue Jan 21 13:14:51 2020 -0500
@@ -197,9 +197,6 @@
 with environment variables
 
   $ PAGER=p1 EDITOR=e1 VISUAL=e2 hg showconfig --debug
-  set config by: $EDITOR
-  set config by: $VISUAL
-  set config by: $PAGER
   read config from: $TESTTMP/hgrc
   repo: bundle.mainreporoot=$TESTTMP
   $PAGER: pager.pager=p1
@@ -261,3 +258,16 @@
   plain: True
   read config from: $TESTTMP/hgrc
   $TESTTMP/hgrc:17: paths.foo=$TESTTMP/bar
+
+Test we can skip the user configuration
+
+  $ cat >> .hg/hgrc <<EOF
+  > [paths]
+  > elephant = babar
+  > EOF
+  $ hg path
+  elephant = $TESTTMP/babar
+  foo = $TESTTMP/bar
+  $ HGRCSKIPREPO=1 hg path
+  foo = $TESTTMP/bar
+
--- a/tests/test-hgweb-json.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-hgweb-json.t	Tue Jan 21 13:14:51 2020 -0500
@@ -782,6 +782,13 @@
       0
     ],
     "desc": "merge test-branch into default",
+    "diff": [],
+    "files": [
+      {
+        "file": "foo-new",
+        "status": "modified"
+      }
+    ],
     "node": "cc725e08502a79dd1eda913760fbe06ed7a9abc7",
     "parents": [
       "ceed296fe500c3fac9541e31dad860cb49c89e45",
@@ -807,6 +814,68 @@
       0
     ],
     "desc": "move foo",
+    "diff": [
+      {
+        "blockno": 1,
+        "lines": [
+          {
+            "l": "--- a/foo\tThu Jan 01 00:00:00 1970 +0000\n",
+            "n": 1,
+            "t": "-"
+          },
+          {
+            "l": "+++ /dev/null\tThu Jan 01 00:00:00 1970 +0000\n",
+            "n": 2,
+            "t": "+"
+          },
+          {
+            "l": "@@ -1,1 +0,0 @@\n",
+            "n": 3,
+            "t": "@"
+          },
+          {
+            "l": "-bar\n",
+            "n": 4,
+            "t": "-"
+          }
+        ]
+      },
+      {
+        "blockno": 2,
+        "lines": [
+          {
+            "l": "--- /dev/null\tThu Jan 01 00:00:00 1970 +0000\n",
+            "n": 1,
+            "t": "-"
+          },
+          {
+            "l": "+++ b/foo-new\tThu Jan 01 00:00:00 1970 +0000\n",
+            "n": 2,
+            "t": "+"
+          },
+          {
+            "l": "@@ -0,0 +1,1 @@\n",
+            "n": 3,
+            "t": "@"
+          },
+          {
+            "l": "+bar\n",
+            "n": 4,
+            "t": "+"
+          }
+        ]
+      }
+    ],
+    "files": [
+      {
+        "file": "foo",
+        "status": "removed"
+      },
+      {
+        "file": "foo-new",
+        "status": "added"
+      }
+    ],
     "node": "78896eb0e102174ce9278438a95e12543e4367a7",
     "parents": [
       "8d7c456572acf3557e8ed8a07286b10c408bcec5"
@@ -833,6 +902,44 @@
       0
     ],
     "desc": "modify da/foo",
+    "diff": [
+      {
+        "blockno": 1,
+        "lines": [
+          {
+            "l": "--- a/da/foo\tThu Jan 01 00:00:00 1970 +0000\n",
+            "n": 1,
+            "t": "-"
+          },
+          {
+            "l": "+++ b/da/foo\tThu Jan 01 00:00:00 1970 +0000\n",
+            "n": 2,
+            "t": "+"
+          },
+          {
+            "l": "@@ -1,1 +1,1 @@\n",
+            "n": 3,
+            "t": "@"
+          },
+          {
+            "l": "-foo\n",
+            "n": 4,
+            "t": "-"
+          },
+          {
+            "l": "+bar\n",
+            "n": 5,
+            "t": "+"
+          }
+        ]
+      }
+    ],
+    "files": [
+      {
+        "file": "da/foo",
+        "status": "modified"
+      }
+    ],
     "node": "8d7c456572acf3557e8ed8a07286b10c408bcec5",
     "parents": [
       "f8bbb9024b10f93cdbb8d940337398291d40dea8"
@@ -855,6 +962,44 @@
       0
     ],
     "desc": "create test branch",
+    "diff": [
+      {
+        "blockno": 1,
+        "lines": [
+          {
+            "l": "--- a/foo\tThu Jan 01 00:00:00 1970 +0000\n",
+            "n": 1,
+            "t": "-"
+          },
+          {
+            "l": "+++ b/foo\tThu Jan 01 00:00:00 1970 +0000\n",
+            "n": 2,
+            "t": "+"
+          },
+          {
+            "l": "@@ -1,1 +1,1 @@\n",
+            "n": 3,
+            "t": "@"
+          },
+          {
+            "l": "-foo\n",
+            "n": 4,
+            "t": "-"
+          },
+          {
+            "l": "+branch\n",
+            "n": 5,
+            "t": "+"
+          }
+        ]
+      }
+    ],
+    "files": [
+      {
+        "file": "foo",
+        "status": "modified"
+      }
+    ],
     "node": "6ab967a8ab3489227a83f80e920faa039a71819f",
     "parents": [
       "06e557f3edf66faa1ccaba5dd8c203c21cc79f1e"
--- a/tests/test-highlight.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-highlight.t	Tue Jan 21 13:14:51 2020 -0500
@@ -184,7 +184,8 @@
   <span id="l28">    <span class="kn">except</span> <span class="p">(</span><span class="ne">ValueError</span><span class="p">,</span> <span class="ne">IndexError</span><span class="p">):</span></span><a href="#l28"></a>
   <span id="l29">        <span class="n">n</span> <span class="o">=</span> <span class="mi">10</span></span><a href="#l29"></a>
   <span id="l30">    <span class="n">p</span> <span class="o">=</span> <span class="n">primes</span><span class="p">()</span></span><a href="#l30"></a>
-  <span id="l31">    <span class="kn">print</span><span class="p">(</span><span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">))))</span></span><a href="#l31"></a>
+  <span id="l31">    <span class="nb">print</span><span class="p">(</span><span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">))))</span></span><a href="#l31"></a> (pygments25 !)
+  <span id="l31">    <span class="kn">print</span><span class="p">(</span><span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">))))</span></span><a href="#l31"></a> (no-pygments25 !)
   <span id="l32"></span><a href="#l32"></a>
   </pre>
   </div>
@@ -845,7 +846,8 @@
   <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l31">    31</a>     <span class="kn">print</span><span class="p">(</span><span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">))))</span></td>
+  <td class="source followlines-btn-parent"><a href="#l31">    31</a>     <span class="nb">print</span><span class="p">(</span><span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">))))</span></td> (pygments25 !)
+  <td class="source followlines-btn-parent"><a href="#l31">    31</a>     <span class="kn">print</span><span class="p">(</span><span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">))))</span></td> (no-pygments25 !)
   </tr>
   <tr id="l32" class="thisrev">
   <td class="annotate parity0">
--- a/tests/test-histedit-obsolete.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-histedit-obsolete.t	Tue Jan 21 13:14:51 2020 -0500
@@ -307,7 +307,7 @@
   o  0:cb9a9f314b8b (public) a
   
   $ hg histedit -r '.~2'
-  abort: cannot edit public changeset: cb9a9f314b8b
+  abort: cannot edit public changesets
   (see 'hg help phases' for details)
   [255]
 
--- a/tests/test-hook.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-hook.t	Tue Jan 21 13:14:51 2020 -0500
@@ -988,6 +988,7 @@
   ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
   Traceback (most recent call last): (py3 !)
   HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (no-py3 !)
+      raise error.HookLoadError( (py38 !)
   mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (py3 !)
   abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
 
@@ -1161,6 +1162,7 @@
   ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
   Traceback (most recent call last):
   HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (no-py3 !)
+      raise error.HookLoadError( (py38 !)
   mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (py3 !)
   abort: precommit.importfail hook is invalid: import of "importfail" failed
 
--- a/tests/test-import.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-import.t	Tue Jan 21 13:14:51 2020 -0500
@@ -435,6 +435,49 @@
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     second change
   
+  $ hg --cwd b phase tip
+  1: draft
+  $ rm -r b
+
+
+hg import --secret
+
+  $ hg clone -r0 a b -q
+  $ hg --cwd b import --no-commit --secret ../exported-tip.patch
+  abort: cannot use --no-commit with --secret
+  [255]
+  $ hg --cwd b import --secret ../exported-tip.patch
+  applying ../exported-tip.patch
+  $ hg --cwd b diff -c . --nodates
+  diff -r 80971e65b431 -r 1d4bd90af0e4 a
+  --- a/a
+  +++ b/a
+  @@ -1,1 +1,2 @@
+   line 1
+  +line 2
+  $ hg --cwd b phase
+  1: secret
+  $ hg --cwd b --config extensions.strip= strip 1 --no-backup --quiet
+  $ HGEDITOR=cat hg --cwd b import --secret --edit ../exported-tip.patch
+  applying ../exported-tip.patch
+  second change
+  
+  
+  HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+  HG: Leave message empty to abort commit.
+  HG: --
+  HG: user: someone
+  HG: branch 'default'
+  HG: changed a
+  $ hg --cwd b diff -c . --nodates
+  diff -r 80971e65b431 -r 1d4bd90af0e4 a
+  --- a/a
+  +++ b/a
+  @@ -1,1 +1,2 @@
+   line 1
+  +line 2
+  $ hg --cwd b phase
+  1: secret
   $ rm -r b
 
 
--- a/tests/test-imports-checker.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-imports-checker.t	Tue Jan 21 13:14:51 2020 -0500
@@ -47,6 +47,11 @@
   > from .. import os
   > EOF
 
+  $ cat > testpackage/stdlibfrom.py << EOF
+  > from __future__ import absolute_import
+  > from collections import abc
+  > EOF
+
   $ cat > testpackage/symbolimport.py << EOF
   > from __future__ import absolute_import
   > from .unsorted import foo
@@ -150,6 +155,7 @@
   testpackage/requirerelative.py:2: import should be relative: testpackage.unsorted
   testpackage/sortedentries.py:2: imports from testpackage not lexically sorted: bar < foo
   testpackage/stdafterlocal.py:3: stdlib import "os" follows local import: testpackage
+  testpackage/stdlibfrom.py:2: direct symbol import abc from collections
   testpackage/subpackage/levelpriority.py:3: higher-level import should come first: testpackage
   testpackage/subpackage/localimport.py:7: multiple "from .. import" statements
   testpackage/subpackage/localimport.py:8: import should be relative: testpackage.subpackage.levelpriority
--- a/tests/test-install.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-install.t	Tue Jan 21 13:14:51 2020 -0500
@@ -162,80 +162,6 @@
     "fsmonitor-watchman": "false",
     "fsmonitor-watchman-error": "warning: Watchman unavailable: watchman exited with code 1",
 
-
-#if test-repo
-  $ . "$TESTDIR/helpers-testrepo.sh"
-
-  $ cat >> wixxml.py << EOF
-  > import os
-  > import subprocess
-  > import sys
-  > import xml.etree.ElementTree as ET
-  > from mercurial import pycompat
-  > 
-  > # MSYS mangles the path if it expands $TESTDIR
-  > testdir = os.environ['TESTDIR']
-  > ns = {'wix' : 'http://schemas.microsoft.com/wix/2006/wi'}
-  > 
-  > def directory(node, relpath):
-  >     '''generator of files in the xml node, rooted at relpath'''
-  >     dirs = node.findall('./{%(wix)s}Directory' % ns)
-  > 
-  >     for d in dirs:
-  >         for subfile in directory(d, relpath + d.attrib['Name'] + '/'):
-  >             yield subfile
-  > 
-  >     files = node.findall('./{%(wix)s}Component/{%(wix)s}File' % ns)
-  > 
-  >     for f in files:
-  >         yield pycompat.sysbytes(relpath + f.attrib['Name'])
-  > 
-  > def hgdirectory(relpath):
-  >     '''generator of tracked files, rooted at relpath'''
-  >     hgdir = "%s/../mercurial" % (testdir)
-  >     args = ['hg', '--cwd', hgdir, 'files', relpath]
-  >     proc = subprocess.Popen(args, stdout=subprocess.PIPE,
-  >                             stderr=subprocess.PIPE)
-  >     output = proc.communicate()[0]
-  > 
-  >     for line in output.splitlines():
-  >         if os.name == 'nt':
-  >             yield line.replace(pycompat.sysbytes(os.sep), b'/')
-  >         else:
-  >             yield line
-  > 
-  > tracked = [f for f in hgdirectory(sys.argv[1])]
-  > 
-  > xml = ET.parse("%s/../contrib/packaging/wix/%s.wxs" % (testdir, sys.argv[1]))
-  > root = xml.getroot()
-  > dir = root.find('.//{%(wix)s}DirectoryRef' % ns)
-  > 
-  > installed = [f for f in directory(dir, '')]
-  > 
-  > print('Not installed:')
-  > for f in sorted(set(tracked) - set(installed)):
-  >     print('  %s' % pycompat.sysstr(f))
-  > 
-  > print('Not tracked:')
-  > for f in sorted(set(installed) - set(tracked)):
-  >     print('  %s' % pycompat.sysstr(f))
-  > EOF
-
-  $ ( testrepohgenv; "$PYTHON" wixxml.py help )
-  Not installed:
-    help/common.txt
-    help/hg-ssh.8.txt
-    help/hg.1.txt
-    help/hgignore.5.txt
-    help/hgrc.5.txt
-  Not tracked:
-
-  $ ( testrepohgenv; "$PYTHON" wixxml.py templates )
-  Not installed:
-  Not tracked:
-
-#endif
-
 Verify that Mercurial is installable with pip. Note that this MUST be
 the last test in this file, because we do some nasty things to the
 shell environment in order to make the virtualenv work reliably.
@@ -255,6 +181,7 @@
 Note: we use this weird path to run pip and hg to avoid platform differences,
 since it's bin on most platforms but Scripts on Windows.
   $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log
+    Failed building wheel for mercurial (?)
   $ ./installenv/*/hg debuginstall || cat pip.log
   checking encoding (ascii)...
   checking Python executable (*) (glob)
--- a/tests/test-issue1175.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-issue1175.t	Tue Jan 21 13:14:51 2020 -0500
@@ -82,7 +82,6 @@
   continue: hg graft --continue
   $ hg graft --continue
   grafting 1:5974126fad84 "b1"
-  warning: can't find ancestor for 'b' copied from 'a'!
   $ hg log -f b -T 'changeset:   {rev}:{node|short}\nsummary:     {desc}\n\n'
   changeset:   3:376d30ccffc0
   summary:     b1
--- a/tests/test-largefiles-misc.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-largefiles-misc.t	Tue Jan 21 13:14:51 2020 -0500
@@ -41,7 +41,7 @@
   > EOF
 
   $ hg config extensions
-  *** failed to import extension largefiles from missing.py: [Errno 2] $ENOENT$: 'missing.py'
+  \*\*\* failed to import extension largefiles from missing.py: [Errno *] $ENOENT$: 'missing.py' (glob)
   abort: repository requires features unknown to this Mercurial: largefiles!
   (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
   [255]
--- a/tests/test-lfs-serve-access.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-lfs-serve-access.t	Tue Jan 21 13:14:51 2020 -0500
@@ -210,7 +210,7 @@
   > 
   >     store = repo.svfs.lfslocalblobstore
   >     class badstore(store.__class__):
-  >         def download(self, oid, src):
+  >         def download(self, oid, src, contentlength):
   >             '''Called in the server to handle reading from the client in a
   >             PUT request.'''
   >             origread = src.read
@@ -218,7 +218,7 @@
   >                 # Simulate bad data/checksum failure from the client
   >                 return b'0' * len(origread(nbytes))
   >             src.read = _badread
-  >             super(badstore, self).download(oid, src)
+  >             super(badstore, self).download(oid, src, contentlength)
   > 
   >         def _read(self, vfs, oid, verify):
   >             '''Called in the server to read data for a GET request, and then
@@ -341,19 +341,20 @@
   $LOCALIP - - [$ERRDATE$] HG error:  Traceback (most recent call last): (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      verifies = store.verify(oid) (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8")) (glob)
-  $LOCALIP - - [$ERRDATE$] HG error:  *Error: [Errno 5] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
+  $LOCALIP - - [$ERRDATE$] HG error:  *Error: [Errno *] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
   $LOCALIP - - [$ERRDATE$] HG error:   (glob)
   $LOCALIP - - [$ERRDATE$] HG error:  Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
   $LOCALIP - - [$ERRDATE$] HG error:  Traceback (most recent call last): (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      verifies = store.verify(oid) (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8")) (glob)
-  $LOCALIP - - [$ERRDATE$] HG error:  *Error: [Errno 5] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
+  $LOCALIP - - [$ERRDATE$] HG error:  *Error: [Errno *] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
   $LOCALIP - - [$ERRDATE$] HG error:   (glob)
   $LOCALIP - - [$ERRDATE$] HG error:  Exception happened while processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
   $LOCALIP - - [$ERRDATE$] HG error:  Traceback (most recent call last): (glob)
-  $LOCALIP - - [$ERRDATE$] HG error:      localstore.download(oid, req.bodyfh) (glob)
-  $LOCALIP - - [$ERRDATE$] HG error:      super(badstore, self).download(oid, src) (glob)
-  $LOCALIP - - [$ERRDATE$] HG error:      _(b'corrupt remote lfs object: %s') % oid (glob)
+  $LOCALIP - - [$ERRDATE$] HG error:      localstore.download(oid, req.bodyfh, req.headers[b'Content-Length'])
+  $LOCALIP - - [$ERRDATE$] HG error:      super(badstore, self).download(oid, src, contentlength)
+  $LOCALIP - - [$ERRDATE$] HG error:      raise LfsCorruptionError( (glob) (py38 !)
+  $LOCALIP - - [$ERRDATE$] HG error:      _(b'corrupt remote lfs object: %s') % oid (glob) (no-py38 !)
   $LOCALIP - - [$ERRDATE$] HG error:  LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (no-py3 !)
   $LOCALIP - - [$ERRDATE$] HG error:  hgext.lfs.blobstore.LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (py3 !)
   $LOCALIP - - [$ERRDATE$] HG error:   (glob)
@@ -363,20 +364,23 @@
       self.do_hgweb()
       for chunk in self.server.application(env, self._start_response):
       for r in self._runwsgi(req, res, repo):
-      rctx, req, res, self.check_perm
+      handled = wireprotoserver.handlewsgirequest( (py38 !)
+      return _processbasictransfer( (py38 !)
+      rctx, req, res, self.check_perm (no-py38 !)
       return func(*(args + a), **kw) (no-py3 !)
-      rctx.repo, req, res, lambda perm: checkperm(rctx, req, perm)
+      rctx.repo, req, res, lambda perm: checkperm(rctx, req, perm) (no-py38 !)
       res.setbodybytes(localstore.read(oid))
       blob = self._read(self.vfs, oid, verify)
       raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
-  *Error: [Errno 5] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error (glob)
+  *Error: [Errno *] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error (glob)
   
   $LOCALIP - - [$ERRDATE$] HG error:  Exception happened while processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
   $LOCALIP - - [$ERRDATE$] HG error:  Traceback (most recent call last): (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      res.setbodybytes(localstore.read(oid)) (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      blob = self._read(self.vfs, oid, verify) (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      blobstore._verify(oid, b'dummy content') (glob)
-  $LOCALIP - - [$ERRDATE$] HG error:      hint=_(b'run hg verify'), (glob)
+  $LOCALIP - - [$ERRDATE$] HG error:      raise LfsCorruptionError( (glob) (py38 !)
+  $LOCALIP - - [$ERRDATE$] HG error:      hint=_(b'run hg verify'), (glob) (no-py38 !)
   $LOCALIP - - [$ERRDATE$] HG error:  LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (no-py3 !)
   $LOCALIP - - [$ERRDATE$] HG error:  hgext.lfs.blobstore.LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (py3 !)
   $LOCALIP - - [$ERRDATE$] HG error:   (glob)
--- a/tests/test-lfs.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-lfs.t	Tue Jan 21 13:14:51 2020 -0500
@@ -40,7 +40,7 @@
   > EOF
 
   $ hg config extensions
-  *** failed to import extension lfs from missing.py: [Errno 2] $ENOENT$: 'missing.py'
+  \*\*\* failed to import extension lfs from missing.py: [Errno *] $ENOENT$: 'missing.py' (glob)
   abort: repository requires features unknown to this Mercurial: lfs!
   (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
   [255]
@@ -218,6 +218,15 @@
   R large
   $ hg commit -m 'renames'
 
+  $ hg cat -r . l -T '{rawdata}\n'
+  version https://git-lfs.github.com/spec/v1
+  oid sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
+  size 39
+  x-hg-copy large
+  x-hg-copyrev 2c531e0992ff3107c511b53cb82a91b6436de8b2
+  x-is-binary 0
+  
+
   $ hg files -r . 'set:copied()'
   l
   s
@@ -796,6 +805,65 @@
   $ test -f fromcorrupt/.hg/store/lfs/objects/66/100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
   [1]
 
+Verify will not try to download lfs blobs, if told not to process lfs content.
+The extension makes sure that the filelog.renamed() path is taken on a missing
+blob, and the output shows that it isn't fetched.
+
+  $ cat > $TESTTMP/lfsrename.py <<EOF
+  > import sys
+  > 
+  > from mercurial import (
+  >     exthelper,
+  >     pycompat,
+  > )
+  > 
+  > from hgext.lfs import (
+  >     pointer,
+  >     wrapper,
+  > )
+  > 
+  > eh = exthelper.exthelper()
+  > uisetup = eh.finaluisetup
+  > 
+  > @eh.wrapfunction(wrapper, b'filelogrenamed')
+  > def filelogrenamed(orig, orig1, self, node):
+  >     ret = orig(orig1, self, node)
+  >     if wrapper._islfs(self._revlog, node) and ret:
+  >         rawtext = self._revlog.rawdata(node)
+  >         metadata = pointer.deserialize(rawtext)
+  >         print('lfs blob %s renamed %s -> %s'
+  >               % (pycompat.sysstr(metadata[b'oid']),
+  >                  pycompat.sysstr(ret[0]),
+  >                  pycompat.fsdecode(self._revlog.filename)))
+  >         sys.stdout.flush()
+  >     return ret
+  > EOF
+
+  $ hg -R fromcorrupt --config lfs.usercache=emptycache verify -v --no-lfs \
+  >                   --config extensions.x=$TESTTMP/lfsrename.py
+  repository uses revlog format 1
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
+  lfs blob sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e renamed large -> l
+  checked 5 changesets with 10 changes to 4 files
+
+Verify will not try to download lfs blobs, if told not to by the config option
+
+  $ hg -R fromcorrupt --config lfs.usercache=emptycache verify -v \
+  >                   --config verify.skipflags=8192 \
+  >                   --config extensions.x=$TESTTMP/lfsrename.py
+  repository uses revlog format 1
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
+  lfs blob sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e renamed large -> l
+  checked 5 changesets with 10 changes to 4 files
+
 Verify will copy/link all lfs objects into the local store that aren't already
 present.  Bypass the corrupted usercache to show that verify works when fed by
 the (uncorrupted) remote store.
--- a/tests/test-linelog.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-linelog.py	Tue Jan 21 13:14:51 2020 -0500
@@ -172,7 +172,7 @@
                 ll.replacelines_vec(rev, a1, a2, blines)
             else:
                 ll.replacelines(rev, a1, a2, b1, b2)
-            ar = ll.annotate(rev)
+            ll.annotate(rev)
             self.assertEqual(ll.annotateresult, lines)
         # Verify we can get back these states by annotating each rev
         for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits(
--- a/tests/test-lock.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-lock.py	Tue Jan 21 13:14:51 2020 -0500
@@ -65,7 +65,7 @@
     def releasefn(self):
         self._releasecalled = True
 
-    def postreleasefn(self):
+    def postreleasefn(self, success):
         self._postreleasecalled = True
 
     def assertacquirecalled(self, called):
--- a/tests/test-log-linerange.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-log-linerange.t	Tue Jan 21 13:14:51 2020 -0500
@@ -993,6 +993,112 @@
 
   $ hg revert -a -C -q
 
+Copies.
+
+  $ hg copy baz bbaz
+  $ sed 's/6/6+/' bbaz > bbaz.new
+  $ mv bbaz.new bbaz
+  $ hg commit -m 'cp baz bbaz; 6-6+'
+  $ hg diff -c .
+  diff --git a/dir/baz b/dir/bbaz
+  copy from dir/baz
+  copy to dir/bbaz
+  --- a/dir/baz
+  +++ b/dir/bbaz
+  @@ -7,7 +7,7 @@
+   3+
+   4
+   5
+  -6
+  +6+
+   7
+   8
+   9
+  $ hg log --copies -f -L bbaz,10:11 -p
+  changeset:   10:91a3d3b6c546
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     cp baz bbaz; 6-6+
+  
+  diff --git a/dir/baz b/dir/bbaz
+  copy from dir/baz
+  copy to dir/bbaz
+  --- a/dir/baz
+  +++ b/dir/bbaz
+  @@ -7,7 +7,7 @@
+   3+
+   4
+   5
+  -6
+  +6+
+   7
+   8
+   9
+  
+  changeset:   3:730a61fbaecf
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     to 11
+  
+  diff --git a/foo b/foo
+  --- a/foo
+  +++ b/foo
+  @@ -6,3 +6,10 @@
+   2+
+   3
+   4
+  +5
+  +6
+  +7
+  +8
+  +9
+  +10
+  +11
+  
+  $ hg log -f -L bbaz,10:11 -p
+  changeset:   10:91a3d3b6c546
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     cp baz bbaz; 6-6+
+  
+  diff --git a/dir/baz b/dir/bbaz
+  copy from dir/baz
+  copy to dir/bbaz
+  --- a/dir/baz
+  +++ b/dir/bbaz
+  @@ -7,7 +7,7 @@
+   3+
+   4
+   5
+  -6
+  +6+
+   7
+   8
+   9
+  
+  changeset:   3:730a61fbaecf
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     to 11
+  
+  diff --git a/foo b/foo
+  --- a/foo
+  +++ b/foo
+  @@ -6,3 +6,10 @@
+   2+
+   3
+   4
+  +5
+  +6
+  +7
+  +8
+  +9
+  +10
+  +11
+  
+
 Binary files work but without diff hunks filtering.
 (Checking w/ and w/o diff.git option.)
 
@@ -1000,7 +1106,7 @@
   $ hg add binary
   $ hg ci -m 'add a binary file' --quiet
   $ hg log -f -L binary,1:2 -p
-  changeset:   10:c96381c229df
+  changeset:   11:dc865b608edf
   tag:         tip
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
@@ -1015,13 +1121,13 @@
   
   
   $ hg log -f -L binary,1:2 -p --config diff.git=false
-  changeset:   10:c96381c229df
+  changeset:   11:dc865b608edf
   tag:         tip
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     add a binary file
   
-  diff -r 6af29c3a778f -r c96381c229df dir/binary
+  diff -r 91a3d3b6c546 -r dc865b608edf dir/binary
   Binary file dir/binary has changed
   
 
--- a/tests/test-manifest.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-manifest.py	Tue Jan 21 13:14:51 2020 -0500
@@ -9,6 +9,7 @@
 from mercurial import (
     manifest as manifestmod,
     match as matchmod,
+    util,
 )
 
 EMTPY_MANIFEST = b''
@@ -169,7 +170,7 @@
         m[b'foo'] = want + b'+'
         self.assertEqual(want, m[b'foo'])
         # make sure the suffix survives a copy
-        match = matchmod.match(b'', b'', [b're:foo'])
+        match = matchmod.match(util.localpath(b'/repo'), b'', [b're:foo'])
         m2 = m.matches(match)
         self.assertEqual(want, m2[b'foo'])
         self.assertEqual(1, len(m2))
@@ -186,7 +187,7 @@
 
     def testMatchException(self):
         m = self.parsemanifest(A_SHORT_MANIFEST)
-        match = matchmod.match(b'', b'', [b're:.*'])
+        match = matchmod.match(util.localpath(b'/repo'), b'', [b're:.*'])
 
         def filt(path):
             if path == b'foo':
@@ -328,7 +329,9 @@
         actually exist.'''
         m = self.parsemanifest(A_DEEPER_MANIFEST)
 
-        match = matchmod.match(b'/', b'', [b'a/f'], default=b'relpath')
+        match = matchmod.match(
+            util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath'
+        )
         m2 = m.matches(match)
 
         self.assertEqual([], m2.keys())
@@ -348,7 +351,7 @@
         '''Tests matches() for what should be a full match.'''
         m = self.parsemanifest(A_DEEPER_MANIFEST)
 
-        match = matchmod.match(b'/', b'', [b''])
+        match = matchmod.match(util.localpath(b'/repo'), b'', [b''])
         m2 = m.matches(match)
 
         self.assertEqual(m.keys(), m2.keys())
@@ -358,7 +361,9 @@
         match against all files within said directory.'''
         m = self.parsemanifest(A_DEEPER_MANIFEST)
 
-        match = matchmod.match(b'/', b'', [b'a/b'], default=b'relpath')
+        match = matchmod.match(
+            util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath'
+        )
         m2 = m.matches(match)
 
         self.assertEqual(
@@ -392,7 +397,9 @@
         when not in the root directory.'''
         m = self.parsemanifest(A_DEEPER_MANIFEST)
 
-        match = matchmod.match(b'/', b'a/b', [b'.'], default=b'relpath')
+        match = matchmod.match(
+            util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath'
+        )
         m2 = m.matches(match)
 
         self.assertEqual(
@@ -415,7 +422,7 @@
         deeper than the specified directory.'''
         m = self.parsemanifest(A_DEEPER_MANIFEST)
 
-        match = matchmod.match(b'/', b'', [b'a/b/*/*.txt'])
+        match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt'])
         m2 = m.matches(match)
 
         self.assertEqual(
@@ -467,7 +474,7 @@
             sorted(dirs),
         )
 
-        match = matchmod.match(b'/', b'', [b'path:a/b/'])
+        match = matchmod.match(util.localpath(b'/repo'), b'', [b'path:a/b/'])
         dirs = [s._dir for s in m.walksubtrees(matcher=match)]
         self.assertEqual(sorted([b'a/b/', b'a/b/c/', b'a/b/d/']), sorted(dirs))
 
--- a/tests/test-manifest.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-manifest.t	Tue Jan 21 13:14:51 2020 -0500
@@ -276,3 +276,103 @@
   crosschecking files in changesets and manifests
   checking files
   checked 2 changesets with 9 changes to 9 files
+  $ cd ..
+
+Test manifest cache interraction with shares
+============================================
+
+  $ echo '[extensions]' >> $HGRCPATH
+  $ echo 'share=' >> $HGRCPATH
+
+creating some history
+
+  $ hg init share-source
+  $ hg debugbuilddag .+10 -n -R share-source
+  $ hg log --debug -r . -R share-source | grep 'manifest:'
+  manifest:    -1:0000000000000000000000000000000000000000
+  $ hg log -r . -R share-source
+  changeset:   -1:000000000000
+  user:        
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  
+  $ hg debugmanifestfulltextcache -R share-source
+  cache contains 4 manifest entries, in order of most to least recent:
+  id: b264454d7033405774b9f353b9b37a082c1a8fba, size 496 bytes
+  id: c6e7b359cbbb5469e98f35acd73ac4757989c4d8, size 450 bytes
+  id: 8de636143b0acc5236cb47ca914bd482d82e6f35, size 405 bytes
+  id: 7d32499319983d90f97ca02a6c2057a1030bebbb, size 360 bytes
+  total cache data size 1.76 KB, on-disk 1.76 KB
+  $ hg -R share-source update 1
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg debugmanifestfulltextcache -R share-source
+  cache contains 4 manifest entries, in order of most to least recent:
+  id: fffc37b38c401b1ab4f8b99da4b72325e31b985f, size 90 bytes
+  id: b264454d7033405774b9f353b9b37a082c1a8fba, size 496 bytes
+  id: c6e7b359cbbb5469e98f35acd73ac4757989c4d8, size 450 bytes
+  id: 8de636143b0acc5236cb47ca914bd482d82e6f35, size 405 bytes
+  total cache data size 1.50 KB, on-disk 1.50 KB
+
+making a share out of it. It should have its manifest cache updated
+
+  $ hg share share-source share-dest
+  updating working directory
+  11 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg log --debug -r . -R share-dest | grep 'manifest:'
+  manifest:    10:b264454d7033405774b9f353b9b37a082c1a8fba
+  $ hg debugmanifestfulltextcache -R share-dest
+  cache contains 1 manifest entries, in order of most to least recent:
+  id: b264454d7033405774b9f353b9b37a082c1a8fba, size 496 bytes
+  total cache data size 520 bytes, on-disk 520 bytes
+
+update on various side should only affect the target share
+
+  $ hg update -R share-dest 4
+  0 files updated, 0 files merged, 6 files removed, 0 files unresolved
+  $ hg log --debug -r . -R share-dest | grep 'manifest:'
+  manifest:    4:d45ead487afec2588272fcec88a25413c0ec7dc8
+  $ hg debugmanifestfulltextcache -R share-dest
+  cache contains 2 manifest entries, in order of most to least recent:
+  id: d45ead487afec2588272fcec88a25413c0ec7dc8, size 225 bytes
+  id: b264454d7033405774b9f353b9b37a082c1a8fba, size 496 bytes
+  total cache data size 769 bytes, on-disk 769 bytes
+  $ hg debugmanifestfulltextcache -R share-source
+  cache contains 4 manifest entries, in order of most to least recent:
+  id: fffc37b38c401b1ab4f8b99da4b72325e31b985f, size 90 bytes
+  id: b264454d7033405774b9f353b9b37a082c1a8fba, size 496 bytes
+  id: c6e7b359cbbb5469e98f35acd73ac4757989c4d8, size 450 bytes
+  id: 8de636143b0acc5236cb47ca914bd482d82e6f35, size 405 bytes
+  total cache data size 1.50 KB, on-disk 1.50 KB
+  $ hg update -R share-source 7
+  6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg log --debug -r . -R share-source | grep 'manifest:'
+  manifest:    7:7d32499319983d90f97ca02a6c2057a1030bebbb
+  $ hg debugmanifestfulltextcache -R share-dest
+  cache contains 2 manifest entries, in order of most to least recent:
+  id: d45ead487afec2588272fcec88a25413c0ec7dc8, size 225 bytes
+  id: b264454d7033405774b9f353b9b37a082c1a8fba, size 496 bytes
+  total cache data size 769 bytes, on-disk 769 bytes
+  $ hg debugmanifestfulltextcache -R share-source
+  cache contains 4 manifest entries, in order of most to least recent:
+  id: 7d32499319983d90f97ca02a6c2057a1030bebbb, size 360 bytes
+  id: fffc37b38c401b1ab4f8b99da4b72325e31b985f, size 90 bytes
+  id: b264454d7033405774b9f353b9b37a082c1a8fba, size 496 bytes
+  id: c6e7b359cbbb5469e98f35acd73ac4757989c4d8, size 450 bytes
+  total cache data size 1.46 KB, on-disk 1.46 KB
+  $ hg update -R share-dest 8
+  4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg log --debug -r . -R share-dest | grep 'manifest:'
+  manifest:    8:8de636143b0acc5236cb47ca914bd482d82e6f35
+  $ hg debugmanifestfulltextcache -R share-dest
+  cache contains 3 manifest entries, in order of most to least recent:
+  id: 8de636143b0acc5236cb47ca914bd482d82e6f35, size 405 bytes
+  id: d45ead487afec2588272fcec88a25413c0ec7dc8, size 225 bytes
+  id: b264454d7033405774b9f353b9b37a082c1a8fba, size 496 bytes
+  total cache data size 1.17 KB, on-disk 1.17 KB
+  $ hg debugmanifestfulltextcache -R share-source
+  cache contains 4 manifest entries, in order of most to least recent:
+  id: 7d32499319983d90f97ca02a6c2057a1030bebbb, size 360 bytes
+  id: fffc37b38c401b1ab4f8b99da4b72325e31b985f, size 90 bytes
+  id: b264454d7033405774b9f353b9b37a082c1a8fba, size 496 bytes
+  id: c6e7b359cbbb5469e98f35acd73ac4757989c4d8, size 450 bytes
+  total cache data size 1.46 KB, on-disk 1.46 KB
+
--- a/tests/test-match.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-match.py	Tue Jan 21 13:14:51 2020 -0500
@@ -10,6 +10,9 @@
 )
 
 
+noop_auditor = lambda name: None
+
+
 class BaseMatcherTests(unittest.TestCase):
     def testVisitdir(self):
         m = matchmod.basematcher()
@@ -63,7 +66,9 @@
 
 class PatternMatcherTests(unittest.TestCase):
     def testVisitdirPrefix(self):
-        m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', patterns=[b'path:dir/subdir']
+        )
         assert isinstance(m, matchmod.patternmatcher)
         self.assertTrue(m.visitdir(b''))
         self.assertTrue(m.visitdir(b'dir'))
@@ -73,7 +78,9 @@
         self.assertFalse(m.visitdir(b'folder'))
 
     def testVisitchildrensetPrefix(self):
-        m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', patterns=[b'path:dir/subdir']
+        )
         assert isinstance(m, matchmod.patternmatcher)
         self.assertEqual(m.visitchildrenset(b''), b'this')
         self.assertEqual(m.visitchildrenset(b'dir'), b'this')
@@ -83,7 +90,9 @@
         self.assertEqual(m.visitchildrenset(b'folder'), set())
 
     def testVisitdirRootfilesin(self):
-        m = matchmod.match(b'x', b'', patterns=[b'rootfilesin:dir/subdir'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', patterns=[b'rootfilesin:dir/subdir'],
+        )
         assert isinstance(m, matchmod.patternmatcher)
         self.assertFalse(m.visitdir(b'dir/subdir/x'))
         self.assertFalse(m.visitdir(b'folder'))
@@ -93,7 +102,9 @@
         self.assertFalse(m.visitdir(b'dir/subdir'))
 
     def testVisitchildrensetRootfilesin(self):
-        m = matchmod.match(b'x', b'', patterns=[b'rootfilesin:dir/subdir'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', patterns=[b'rootfilesin:dir/subdir'],
+        )
         assert isinstance(m, matchmod.patternmatcher)
         self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set())
         self.assertEqual(m.visitchildrenset(b'folder'), set())
@@ -104,7 +115,9 @@
         self.assertEqual(m.visitchildrenset(b'dir/subdir'), set())
 
     def testVisitdirGlob(self):
-        m = matchmod.match(b'x', b'', patterns=[b'glob:dir/z*'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', patterns=[b'glob:dir/z*']
+        )
         assert isinstance(m, matchmod.patternmatcher)
         self.assertTrue(m.visitdir(b''))
         self.assertTrue(m.visitdir(b'dir'))
@@ -114,7 +127,9 @@
         self.assertTrue(m.visitdir(b'dir/subdir/x'))
 
     def testVisitchildrensetGlob(self):
-        m = matchmod.match(b'x', b'', patterns=[b'glob:dir/z*'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', patterns=[b'glob:dir/z*']
+        )
         assert isinstance(m, matchmod.patternmatcher)
         self.assertEqual(m.visitchildrenset(b''), b'this')
         self.assertEqual(m.visitchildrenset(b'folder'), set())
@@ -126,7 +141,9 @@
 
 class IncludeMatcherTests(unittest.TestCase):
     def testVisitdirPrefix(self):
-        m = matchmod.match(b'x', b'', include=[b'path:dir/subdir'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
         assert isinstance(m, matchmod.includematcher)
         self.assertTrue(m.visitdir(b''))
         self.assertTrue(m.visitdir(b'dir'))
@@ -136,7 +153,9 @@
         self.assertFalse(m.visitdir(b'folder'))
 
     def testVisitchildrensetPrefix(self):
-        m = matchmod.match(b'x', b'', include=[b'path:dir/subdir'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
         assert isinstance(m, matchmod.includematcher)
         self.assertEqual(m.visitchildrenset(b''), {b'dir'})
         self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
@@ -146,7 +165,9 @@
         self.assertEqual(m.visitchildrenset(b'folder'), set())
 
     def testVisitdirRootfilesin(self):
-        m = matchmod.match(b'x', b'', include=[b'rootfilesin:dir/subdir'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'rootfilesin:dir/subdir']
+        )
         assert isinstance(m, matchmod.includematcher)
         self.assertTrue(m.visitdir(b''))
         self.assertTrue(m.visitdir(b'dir'))
@@ -155,7 +176,9 @@
         self.assertFalse(m.visitdir(b'folder'))
 
     def testVisitchildrensetRootfilesin(self):
-        m = matchmod.match(b'x', b'', include=[b'rootfilesin:dir/subdir'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'rootfilesin:dir/subdir']
+        )
         assert isinstance(m, matchmod.includematcher)
         self.assertEqual(m.visitchildrenset(b''), {b'dir'})
         self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
@@ -164,7 +187,9 @@
         self.assertEqual(m.visitchildrenset(b'folder'), set())
 
     def testVisitdirGlob(self):
-        m = matchmod.match(b'x', b'', include=[b'glob:dir/z*'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'glob:dir/z*']
+        )
         assert isinstance(m, matchmod.includematcher)
         self.assertTrue(m.visitdir(b''))
         self.assertTrue(m.visitdir(b'dir'))
@@ -174,7 +199,9 @@
         self.assertTrue(m.visitdir(b'dir/subdir/x'))
 
     def testVisitchildrensetGlob(self):
-        m = matchmod.match(b'x', b'', include=[b'glob:dir/z*'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'glob:dir/z*']
+        )
         assert isinstance(m, matchmod.includematcher)
         self.assertEqual(m.visitchildrenset(b''), {b'dir'})
         self.assertEqual(m.visitchildrenset(b'folder'), set())
@@ -286,7 +313,9 @@
 
     def testVisitdirM2SubdirPrefix(self):
         m1 = matchmod.alwaysmatcher()
-        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', patterns=[b'path:dir/subdir']
+        )
         dm = matchmod.differencematcher(m1, m2)
         self.assertEqual(dm.visitdir(b''), True)
         self.assertEqual(dm.visitdir(b'dir'), True)
@@ -301,7 +330,9 @@
 
     def testVisitchildrensetM2SubdirPrefix(self):
         m1 = matchmod.alwaysmatcher()
-        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', patterns=[b'path:dir/subdir']
+        )
         dm = matchmod.differencematcher(m1, m2)
         self.assertEqual(dm.visitchildrenset(b''), b'this')
         self.assertEqual(dm.visitchildrenset(b'dir'), b'this')
@@ -317,8 +348,12 @@
     # We're using includematcher instead of patterns because it behaves slightly
     # better (giving narrower results) than patternmatcher.
     def testVisitdirIncludeInclude(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
-        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'rootfilesin:dir']
+        )
         dm = matchmod.differencematcher(m1, m2)
         self.assertEqual(dm.visitdir(b''), True)
         self.assertEqual(dm.visitdir(b'dir'), True)
@@ -332,8 +367,12 @@
         self.assertEqual(dm.visitdir(b'dir/subdir/x'), True)
 
     def testVisitchildrensetIncludeInclude(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
-        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'rootfilesin:dir']
+        )
         dm = matchmod.differencematcher(m1, m2)
         self.assertEqual(dm.visitchildrenset(b''), {b'dir'})
         self.assertEqual(dm.visitchildrenset(b'dir'), {b'subdir'})
@@ -402,7 +441,9 @@
 
     def testVisitdirM2SubdirPrefix(self):
         m1 = matchmod.alwaysmatcher()
-        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', patterns=[b'path:dir/subdir']
+        )
         im = matchmod.intersectmatchers(m1, m2)
         self.assertEqual(im.visitdir(b''), True)
         self.assertEqual(im.visitdir(b'dir'), True)
@@ -417,7 +458,9 @@
 
     def testVisitchildrensetM2SubdirPrefix(self):
         m1 = matchmod.alwaysmatcher()
-        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
         im = matchmod.intersectmatchers(m1, m2)
         self.assertEqual(im.visitchildrenset(b''), {b'dir'})
         self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
@@ -431,8 +474,12 @@
     # We're using includematcher instead of patterns because it behaves slightly
     # better (giving narrower results) than patternmatcher.
     def testVisitdirIncludeInclude(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
-        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'rootfilesin:dir']
+        )
         im = matchmod.intersectmatchers(m1, m2)
         self.assertEqual(im.visitdir(b''), True)
         self.assertEqual(im.visitdir(b'dir'), True)
@@ -443,8 +490,12 @@
         self.assertFalse(im.visitdir(b'dir/subdir/x'))
 
     def testVisitchildrensetIncludeInclude(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
-        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'rootfilesin:dir']
+        )
         im = matchmod.intersectmatchers(m1, m2)
         self.assertEqual(im.visitchildrenset(b''), {b'dir'})
         self.assertEqual(im.visitchildrenset(b'dir'), b'this')
@@ -457,8 +508,12 @@
     # We're using includematcher instead of patterns because it behaves slightly
     # better (giving narrower results) than patternmatcher.
     def testVisitdirIncludeInclude2(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
-        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:folder']
+        )
         im = matchmod.intersectmatchers(m1, m2)
         # FIXME: is True correct here?
         self.assertEqual(im.visitdir(b''), True)
@@ -470,8 +525,12 @@
         self.assertFalse(im.visitdir(b'dir/subdir/x'))
 
     def testVisitchildrensetIncludeInclude2(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
-        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:folder']
+        )
         im = matchmod.intersectmatchers(m1, m2)
         # FIXME: is set() correct here?
         self.assertEqual(im.visitchildrenset(b''), set())
@@ -485,8 +544,12 @@
     # We're using includematcher instead of patterns because it behaves slightly
     # better (giving narrower results) than patternmatcher.
     def testVisitdirIncludeInclude3(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
-        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/x']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
         im = matchmod.intersectmatchers(m1, m2)
         self.assertEqual(im.visitdir(b''), True)
         self.assertEqual(im.visitdir(b'dir'), True)
@@ -498,8 +561,12 @@
         self.assertEqual(im.visitdir(b'dir/subdir/x'), True)
 
     def testVisitchildrensetIncludeInclude3(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
-        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/x']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
         im = matchmod.intersectmatchers(m1, m2)
         self.assertEqual(im.visitchildrenset(b''), {b'dir'})
         self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
@@ -513,8 +580,12 @@
     # We're using includematcher instead of patterns because it behaves slightly
     # better (giving narrower results) than patternmatcher.
     def testVisitdirIncludeInclude4(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
-        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/x']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/z']
+        )
         im = matchmod.intersectmatchers(m1, m2)
         # OPT: these next three could probably be False as well.
         self.assertEqual(im.visitdir(b''), True)
@@ -526,8 +597,12 @@
         self.assertFalse(im.visitdir(b'dir/subdir/x'))
 
     def testVisitchildrensetIncludeInclude4(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
-        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/x']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/z']
+        )
         im = matchmod.intersectmatchers(m1, m2)
         # OPT: these next two could probably be set() as well.
         self.assertEqual(im.visitchildrenset(b''), {b'dir'})
@@ -620,7 +695,9 @@
 
     def testVisitdirM2SubdirPrefix(self):
         m1 = matchmod.alwaysmatcher()
-        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', patterns=[b'path:dir/subdir']
+        )
         um = matchmod.unionmatcher([m1, m2])
         self.assertEqual(um.visitdir(b''), b'all')
         self.assertEqual(um.visitdir(b'dir'), b'all')
@@ -632,7 +709,9 @@
 
     def testVisitchildrensetM2SubdirPrefix(self):
         m1 = matchmod.alwaysmatcher()
-        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
         um = matchmod.unionmatcher([m1, m2])
         self.assertEqual(um.visitchildrenset(b''), b'all')
         self.assertEqual(um.visitchildrenset(b'dir'), b'all')
@@ -645,8 +724,12 @@
     # We're using includematcher instead of patterns because it behaves slightly
     # better (giving narrower results) than patternmatcher.
     def testVisitdirIncludeInclude(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
-        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'rootfilesin:dir']
+        )
         um = matchmod.unionmatcher([m1, m2])
         self.assertEqual(um.visitdir(b''), True)
         self.assertEqual(um.visitdir(b'dir'), True)
@@ -658,8 +741,12 @@
         self.assertEqual(um.visitdir(b'dir/subdir/x'), True)
 
     def testVisitchildrensetIncludeInclude(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
-        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'rootfilesin:dir']
+        )
         um = matchmod.unionmatcher([m1, m2])
         self.assertEqual(um.visitchildrenset(b''), {b'dir'})
         self.assertEqual(um.visitchildrenset(b'dir'), b'this')
@@ -673,8 +760,12 @@
     # We're using includematcher instead of patterns because it behaves slightly
     # better (giving narrower results) than patternmatcher.
     def testVisitdirIncludeInclude2(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
-        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:folder']
+        )
         um = matchmod.unionmatcher([m1, m2])
         self.assertEqual(um.visitdir(b''), True)
         self.assertEqual(um.visitdir(b'dir'), True)
@@ -686,8 +777,12 @@
         self.assertEqual(um.visitdir(b'dir/subdir/x'), True)
 
     def testVisitchildrensetIncludeInclude2(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
-        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:folder']
+        )
         um = matchmod.unionmatcher([m1, m2])
         self.assertEqual(um.visitchildrenset(b''), {b'folder', b'dir'})
         self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
@@ -701,8 +796,12 @@
     # We're using includematcher instead of patterns because it behaves slightly
     # better (giving narrower results) than patternmatcher.
     def testVisitdirIncludeInclude3(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
-        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/x']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
         um = matchmod.unionmatcher([m1, m2])
         self.assertEqual(um.visitdir(b''), True)
         self.assertEqual(um.visitdir(b'dir'), True)
@@ -714,8 +813,12 @@
         self.assertEqual(um.visitdir(b'dir/subdir/z'), True)
 
     def testVisitchildrensetIncludeInclude3(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
-        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/x']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
         um = matchmod.unionmatcher([m1, m2])
         self.assertEqual(um.visitchildrenset(b''), {b'dir'})
         self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
@@ -729,8 +832,12 @@
     # We're using includematcher instead of patterns because it behaves slightly
     # better (giving narrower results) than patternmatcher.
     def testVisitdirIncludeInclude4(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
-        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/x']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/z']
+        )
         um = matchmod.unionmatcher([m1, m2])
         # OPT: these next three could probably be False as well.
         self.assertEqual(um.visitdir(b''), True)
@@ -742,8 +849,12 @@
         self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
 
     def testVisitchildrensetIncludeInclude4(self):
-        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
-        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        m1 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/x']
+        )
+        m2 = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir/z']
+        )
         um = matchmod.unionmatcher([m1, m2])
         self.assertEqual(um.visitchildrenset(b''), {b'dir'})
         self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
@@ -756,7 +867,9 @@
 
 class SubdirMatcherTests(unittest.TestCase):
     def testVisitdir(self):
-        m = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
         sm = matchmod.subdirmatcher(b'dir', m)
 
         self.assertEqual(sm.visitdir(b''), True)
@@ -767,7 +880,9 @@
         self.assertFalse(sm.visitdir(b'foo'))
 
     def testVisitchildrenset(self):
-        m = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m = matchmod.match(
+            util.localpath(b'/repo'), b'', include=[b'path:dir/subdir']
+        )
         sm = matchmod.subdirmatcher(b'dir', m)
 
         self.assertEqual(sm.visitchildrenset(b''), {b'subdir'})
@@ -781,7 +896,10 @@
 class PrefixdirMatcherTests(unittest.TestCase):
     def testVisitdir(self):
         m = matchmod.match(
-            util.localpath(b'root/d'), b'e/f', [b'../a.txt', b'b.txt']
+            util.localpath(b'/root/d'),
+            b'e/f',
+            [b'../a.txt', b'b.txt'],
+            auditor=noop_auditor,
         )
         pm = matchmod.prefixdirmatcher(b'd', m)
 
@@ -814,7 +932,10 @@
 
     def testVisitchildrenset(self):
         m = matchmod.match(
-            util.localpath(b'root/d'), b'e/f', [b'../a.txt', b'b.txt']
+            util.localpath(b'/root/d'),
+            b'e/f',
+            [b'../a.txt', b'b.txt'],
+            auditor=noop_auditor,
         )
         pm = matchmod.prefixdirmatcher(b'd', m)
 
--- a/tests/test-merge-default.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-merge-default.t	Tue Jan 21 13:14:51 2020 -0500
@@ -44,9 +44,10 @@
   (run 'hg heads .' to see heads, specify rev with -r)
   [255]
 
-Should succeed:
+Should succeed (we're specifying commands.merge.require-rev=True just to test
+that it allows merge to succeed if we specify a revision):
 
-  $ hg merge 2
+  $ hg merge 2 --config commands.merge.require-rev=True
   0 files updated, 1 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ hg id -Tjson
@@ -63,6 +64,13 @@
   ]
   $ hg commit -mm1
 
+Should fail because we didn't specify a revision (even though it would have
+succeeded without this):
+
+  $ hg merge --config commands.merge.require-rev=True
+  abort: configuration requires specifying revision to merge with
+  [255]
+
 Should succeed - 2 heads:
 
   $ hg merge -P
@@ -88,6 +96,13 @@
    }
   ]
 
+Should fail because we didn't specify a revision (even though it would have
+failed without this due to being on tip, but this check comes first):
+
+  $ hg merge --config commands.merge.require-rev=True
+  abort: configuration requires specifying revision to merge with
+  [255]
+
 Should fail because at tip:
 
   $ hg merge
--- a/tests/test-merge-tools.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-merge-tools.t	Tue Jan 21 13:14:51 2020 -0500
@@ -4,6 +4,8 @@
   $ cat >> $HGRCPATH << EOF
   > [ui]
   > merge=
+  > [commands]
+  > merge.require-rev=True
   > EOF
   $ hg init repo
   $ cd repo
@@ -1908,6 +1910,7 @@
   0 files updated, 0 files merged, 0 files removed, 1 files unresolved
   use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
   [1]
+(Testing that commands.merge.require-rev doesn't break --abort)
   $ hg merge --abort -q
 
 (for ui.merge, ignored unintentionally)
--- a/tests/test-parseindex2.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-parseindex2.py	Tue Jan 21 13:14:51 2020 -0500
@@ -20,7 +20,7 @@
     pycompat,
 )
 
-parsers = policy.importmod(r'parsers')
+parsers = policy.importmod('parsers')
 
 # original python implementation
 def gettype(q):
@@ -247,6 +247,34 @@
         got = index[-1]
         self.assertEqual(want, got)  # no inline data
 
+    def testdelitemwithoutnodetree(self):
+        index, _junk = parsers.parse_index2(data_non_inlined, False)
+
+        def hexrev(rev):
+            if rev == nullrev:
+                return b'\xff\xff\xff\xff'
+            else:
+                return nodemod.bin('%08x' % rev)
+
+        def appendrev(p1, p2=nullrev):
+            # node won't matter for this test, let's just make sure
+            # they don't collide. Other data don't matter either.
+            node = hexrev(p1) + hexrev(p2) + b'.' * 12
+            index.append((0, 0, 12, 1, 34, p1, p2, node))
+
+        appendrev(4)
+        appendrev(5)
+        appendrev(6)
+        self.assertEqual(len(index), 7)
+
+        del index[1:-1]
+
+        # assertions that failed before correction
+        self.assertEqual(len(index), 1)  # was 4
+        headrevs = getattr(index, 'headrevs', None)
+        if headrevs is not None:  # not implemented in pure
+            self.assertEqual(index.headrevs(), [0])  # gave ValueError
+
 
 if __name__ == '__main__':
     import silenttestrunner
--- a/tests/test-phabricator.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-phabricator.t	Tue Jan 21 13:14:51 2020 -0500
@@ -20,3 +20,227 @@
   > hgphab.phabtoken = cli-hahayouwish
   > EOF
   $ VCR="$TESTDIR/phabricator"
+
+Error is handled reasonably. We override the phabtoken here so that
+when you're developing changes to phabricator.py you can edit the
+above config and have a real token in the test but not have to edit
+this test.
+  $ hg phabread --config auth.hgphab.phabtoken=cli-notavalidtoken \
+  >  --test-vcr "$VCR/phabread-conduit-error.json" D4480 | head
+  abort: Conduit Error (ERR-INVALID-AUTH): API token "cli-notavalidtoken" has the wrong length. API tokens should be 32 characters long.
+
+Basic phabread:
+  $ hg phabread --test-vcr "$VCR/phabread-4480.json" D4480 | head
+  # HG changeset patch
+  # Date 1536771503 0
+  # Parent  a5de21c9e3703f8e8eb064bd7d893ff2f703c66a
+  exchangev2: start to implement pull with wire protocol v2
+  
+  Wire protocol version 2 will take a substantially different
+  approach to exchange than version 1 (at least as far as pulling
+  is concerned).
+  
+  This commit establishes a new exchangev2 module for holding
+
+phabupdate with an accept:
+  $ hg phabupdate --accept D4564 \
+  > -m 'I think I like where this is headed. Will read rest of series later.'\
+  >  --test-vcr "$VCR/accept-4564.json"
+  abort: Conduit Error (ERR-CONDUIT-CORE): Validation errors:
+    - You can not accept this revision because it has already been closed. Only open revisions can be accepted.
+  [255]
+  $ hg phabupdate --accept D7913 -m 'LGTM' --test-vcr "$VCR/accept-7913.json"
+
+Create a differential diff:
+  $ HGENCODING=utf-8; export HGENCODING
+  $ echo alpha > alpha
+  $ hg ci --addremove -m 'create alpha for phabricator test €'
+  adding alpha
+  $ hg phabsend -r . --test-vcr "$VCR/phabsend-create-alpha.json"
+  D7915 - created - d386117f30e6: create alpha for phabricator test \xe2\x82\xac (esc)
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/d386117f30e6-24ffe649-phabsend.hg
+  $ echo more >> alpha
+  $ HGEDITOR=true hg ci --amend
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/347bf67801e5-3bf313e4-amend.hg
+  $ echo beta > beta
+  $ hg ci --addremove -m 'create beta for phabricator test'
+  adding beta
+  $ hg phabsend -r ".^::" --test-vcr "$VCR/phabsend-update-alpha-create-beta.json"
+  D7915 - updated - c44b38f24a45: create alpha for phabricator test \xe2\x82\xac (esc)
+  D7916 - created - 9e6901f21d5b: create beta for phabricator test
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/9e6901f21d5b-1fcd4f0e-phabsend.hg
+  $ unset HGENCODING
+
+The amend won't explode after posting a public commit.  The local tag is left
+behind to identify it.
+
+  $ echo 'public change' > beta
+  $ hg ci -m 'create public change for phabricator testing'
+  $ hg phase --public .
+  $ echo 'draft change' > alpha
+  $ hg ci -m 'create draft change for phabricator testing'
+  $ hg phabsend --amend -r '.^::' --test-vcr "$VCR/phabsend-create-public.json"
+  D7917 - created - 7b4185ab5d16: create public change for phabricator testing
+  D7918 - created - 251c1c333fc6: create draft change for phabricator testing
+  warning: not updating public commit 2:7b4185ab5d16
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/251c1c333fc6-41cb7c3b-phabsend.hg
+  $ hg tags -v
+  tip                                3:3244dc4a3334
+  D7917                              2:7b4185ab5d16 local
+
+  $ hg debugcallconduit user.search --test-vcr "$VCR/phab-conduit.json" <<EOF
+  > {
+  >     "constraints": {
+  >         "isBot": true
+  >     }
+  > }
+  > EOF
+  {
+    "cursor": {
+      "after": null,
+      "before": null,
+      "limit": 100,
+      "order": null
+    },
+    "data": [],
+    "maps": {},
+    "query": {
+      "queryKey": null
+    }
+  }
+
+Template keywords
+  $ hg log -T'{rev} {phabreview|json}\n'
+  3 {"id": "D7918", "url": "https://phab.mercurial-scm.org/D7918"}
+  2 {"id": "D7917", "url": "https://phab.mercurial-scm.org/D7917"}
+  1 {"id": "D7916", "url": "https://phab.mercurial-scm.org/D7916"}
+  0 {"id": "D7915", "url": "https://phab.mercurial-scm.org/D7915"}
+
+  $ hg log -T'{rev} {if(phabreview, "{phabreview.url} {phabreview.id}")}\n'
+  3 https://phab.mercurial-scm.org/D7918 D7918
+  2 https://phab.mercurial-scm.org/D7917 D7917
+  1 https://phab.mercurial-scm.org/D7916 D7916
+  0 https://phab.mercurial-scm.org/D7915 D7915
+
+Commenting when phabsending:
+  $ echo comment > comment
+  $ hg ci --addremove -m "create comment for phabricator test"
+  adding comment
+  $ hg phabsend -r . -m "For default branch" --test-vcr "$VCR/phabsend-comment-created.json"
+  D7919 - created - d5dddca9023d: create comment for phabricator test
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/d5dddca9023d-adf673ba-phabsend.hg
+  $ echo comment2 >> comment
+  $ hg ci --amend
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/f7db812bbe1d-8fcded77-amend.hg
+  $ hg phabsend -r . -m "Address review comments" --test-vcr "$VCR/phabsend-comment-updated.json"
+  D7919 - updated - 1849d7828727: create comment for phabricator test
+
+Phabsending a skipped commit:
+  $ hg phabsend --no-amend -r . --test-vcr "$VCR/phabsend-skipped.json"
+  D7919 - skipped - 1849d7828727: create comment for phabricator test
+
+Phabreading a DREV with a local:commits time as a string:
+  $ hg phabread --test-vcr "$VCR/phabread-str-time.json" D1285
+  # HG changeset patch
+  # User Pulkit Goyal <7895pulkit@gmail.com>
+  # Date 1509404054 -19800
+  # Node ID 44fc1c1f1774a76423b9c732af6938435099bcc5
+  # Parent  8feef8ef8389a3b544e0a74624f1efc3a8d85d35
+  repoview: add a new attribute _visibilityexceptions and related API
+  
+  Currently we don't have a defined way in core to make some hidden revisions
+  visible in filtered repo. Extensions to achieve the purpose of unhiding some
+  hidden commits, wrap repoview.pinnedrevs() function.
+  
+  To make the above task simple and have well defined API, this patch adds a new
+  attribute '_visibilityexceptions' to repoview class which will contains
+  the hidden revs which should be exception.
+  This will allow to set different exceptions for different repoview objects
+  backed by the same unfiltered repo.
+  
+  This patch also adds API to add revs to the attribute set and get them.
+  
+  Thanks to Jun for suggesting the use of repoview class instead of localrepo.
+  
+  Differential Revision: https://phab.mercurial-scm.org/D1285
+  diff --git a/mercurial/repoview.py b/mercurial/repoview.py
+  --- a/mercurial/repoview.py
+  +++ b/mercurial/repoview.py
+  @@ * @@ (glob)
+       subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
+       """
+   
+  +    # hidden revs which should be visible
+  +    _visibilityexceptions = set()
+  +
+       def __init__(self, repo, filtername):
+           object.__setattr__(self, r'_unfilteredrepo', repo)
+           object.__setattr__(self, r'filtername', filtername)
+  @@ -231,6 +234,14 @@
+               return self
+           return self.unfiltered().filtered(name)
+   
+  +    def addvisibilityexceptions(self, revs):
+  +        """adds hidden revs which should be visible to set of exceptions"""
+  +        self._visibilityexceptions.update(revs)
+  +
+  +    def getvisibilityexceptions(self):
+  +        """returns the set of hidden revs which should be visible"""
+  +        return self._visibilityexceptions
+  +
+       # everything access are forwarded to the proxied repo
+       def __getattr__(self, attr):
+           return getattr(self._unfilteredrepo, attr)
+  diff --git a/mercurial/localrepo.py b/mercurial/localrepo.py
+  --- a/mercurial/localrepo.py
+  +++ b/mercurial/localrepo.py
+  @@ -570,6 +570,14 @@
+       def close(self):
+           self._writecaches()
+   
+  +    def addvisibilityexceptions(self, exceptions):
+  +        # should be called on a filtered repository
+  +        pass
+  +
+  +    def getvisibilityexceptions(self):
+  +        # should be called on a filtered repository
+  +        return set()
+  +
+       def _loadextensions(self):
+           extensions.loadall(self.ui)
+   
+  
+A bad .arcconfig doesn't error out
+  $ echo 'garbage' > .arcconfig
+  $ hg config phabricator --debug
+  invalid JSON in $TESTTMP/repo/.arcconfig
+  read config from: */.hgrc (glob)
+  $TESTTMP/repo/.hg/hgrc:*: phabricator.url=https://phab.mercurial-scm.org/ (glob)
+  $TESTTMP/repo/.hg/hgrc:*: phabricator.callsign=HG (glob)
+
+The .arcconfig content overrides global config
+  $ cat >> $HGRCPATH << EOF
+  > [phabricator]
+  > url = global
+  > callsign = global
+  > EOF
+  $ cp $TESTDIR/../.arcconfig .
+  $ mv .hg/hgrc .hg/hgrc.bak
+  $ hg config phabricator --debug
+  read config from: */.hgrc (glob)
+  $TESTTMP/repo/.arcconfig: phabricator.callsign=HG
+  $TESTTMP/repo/.arcconfig: phabricator.url=https://phab.mercurial-scm.org/
+
+But it doesn't override local config
+  $ cat >> .hg/hgrc << EOF
+  > [phabricator]
+  > url = local
+  > callsign = local
+  > EOF
+  $ hg config phabricator --debug
+  read config from: */.hgrc (glob)
+  $TESTTMP/repo/.hg/hgrc:*: phabricator.url=local (glob)
+  $TESTTMP/repo/.hg/hgrc:*: phabricator.callsign=local (glob)
+  $ mv .hg/hgrc.bak .hg/hgrc
+
+  $ cd ..
--- a/tests/test-phases.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-phases.t	Tue Jan 21 13:14:51 2020 -0500
@@ -48,13 +48,58 @@
   1 1 B
   0 1 A
 
-Draft commit are properly created over public one:
+Working directory phase is secret when its parent is secret.
+
+  $ hg phase --force --secret .
+  test-debug-phase: move rev 0: 1 -> 2
+  test-debug-phase: move rev 1: 1 -> 2
+  test-hook-close-phase: 4a2df7238c3b48766b5e22fafbb8a2f506ec8256:  draft -> secret
+  test-hook-close-phase: 27547f69f25460a52fff66ad004e58da7ad3fb56:  draft -> secret
+  $ hg log -r 'wdir()' -T '{phase}\n'
+  secret
+  $ hg log -r 'wdir() and public()' -T '{phase}\n'
+  $ hg log -r 'wdir() and draft()' -T '{phase}\n'
+  $ hg log -r 'wdir() and secret()' -T '{phase}\n'
+  secret
+
+Working directory phase is draft when its parent is draft.
+
+  $ hg phase --draft .
+  test-debug-phase: move rev 1: 2 -> 1
+  test-hook-close-phase: 27547f69f25460a52fff66ad004e58da7ad3fb56:  secret -> draft
+  $ hg log -r 'wdir()' -T '{phase}\n'
+  draft
+  $ hg log -r 'wdir() and public()' -T '{phase}\n'
+  $ hg log -r 'wdir() and draft()' -T '{phase}\n'
+  draft
+  $ hg log -r 'wdir() and secret()' -T '{phase}\n'
+
+Working directory phase is secret when a new commit will be created as secret,
+even if the parent is draft.
+
+  $ hg log -r 'wdir() and secret()' -T '{phase}\n' \
+  > --config phases.new-commit='secret'
+  secret
+
+Working directory phase is draft when its parent is public.
 
   $ hg phase --public .
   test-debug-phase: move rev 0: 1 -> 0
   test-debug-phase: move rev 1: 1 -> 0
   test-hook-close-phase: 4a2df7238c3b48766b5e22fafbb8a2f506ec8256:  draft -> public
   test-hook-close-phase: 27547f69f25460a52fff66ad004e58da7ad3fb56:  draft -> public
+  $ hg log -r 'wdir()' -T '{phase}\n'
+  draft
+  $ hg log -r 'wdir() and public()' -T '{phase}\n'
+  $ hg log -r 'wdir() and draft()' -T '{phase}\n'
+  draft
+  $ hg log -r 'wdir() and secret()' -T '{phase}\n'
+  $ hg log -r 'wdir() and secret()' -T '{phase}\n' \
+  > --config phases.new-commit='secret'
+  secret
+
+Draft commit are properly created over public one:
+
   $ hg phase
   1: public
   $ hglog
--- a/tests/test-rebase-collapse.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-rebase-collapse.t	Tue Jan 21 13:14:51 2020 -0500
@@ -288,6 +288,18 @@
 
   $ hg rebase -s F --dest I --collapse # root (F) is not a merge
   rebasing 6:c82b08f646f1 "F" (F)
+  file 'E' was deleted in local [dest] but was modified in other [source].
+  You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
+  What do you want to do? u
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+
+  $ echo F > E
+  $ hg resolve -m
+  (no more unresolved files)
+  continue: hg rebase --continue
+  $ hg rebase -c
+  rebasing 6:c82b08f646f1 "F" (F)
   rebasing 7:a6db7fa104e1 "G" (G)
   rebasing 8:e1d201b72d91 "H" (H tip)
   saved backup bundle to $TESTTMP/external-parent/.hg/strip-backup/c82b08f646f1-f2721fbf-rebase.hg
@@ -592,7 +604,7 @@
   o  0: f447d5abf5ea 'add'
   
   $ hg rebase --collapse -r 1 -d 0
-  abort: can't remove original changesets with unrebased descendants
+  abort: cannot rebase changeset with children
   (use --keep to keep original changesets)
   [255]
 
--- a/tests/test-rebase-inmemory.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-rebase-inmemory.t	Tue Jan 21 13:14:51 2020 -0500
@@ -249,6 +249,10 @@
   rebasing 8:e147e6e3c490 "c/subdir/file.txt" (tip)
   abort: error: 'c/subdir/file.txt' conflicts with file 'c' in 3.
   [255]
+FIXME: shouldn't need this, but when we hit path conflicts in dryrun mode, we
+don't clean up rebasestate.
+  $ hg rebase --abort
+  rebase aborted
   $ hg rebase -r 3 -d . -n
   starting dry-run rebase; repository will not be changed
   rebasing 3:844a7de3e617 "c"
@@ -328,10 +332,10 @@
   
 Make sure it throws error while passing --continue or --abort with --dry-run
   $ hg rebase -s 2 -d 6 -n --continue
-  abort: cannot specify both --dry-run and --continue
+  abort: cannot specify both --continue and --dry-run
   [255]
   $ hg rebase -s 2 -d 6 -n --abort
-  abort: cannot specify both --dry-run and --abort
+  abort: cannot specify both --abort and --dry-run
   [255]
 
 Check dryrun gives correct results when there is no conflict in rebasing
@@ -504,9 +508,8 @@
   $ hg resolve -l
   U e
   $ hg rebase -s 2 -d 7
-  rebasing 2:177f92b77385 "c"
-  abort: outstanding merge conflicts
-  (use 'hg resolve' to resolve)
+  abort: outstanding uncommitted merge
+  (use 'hg commit' or 'hg merge --abort')
   [255]
   $ hg resolve -l
   U e
@@ -545,10 +548,10 @@
   abort: cannot specify both --confirm and --dry-run
   [255]
   $ hg rebase -s 2 -d . --confirm --abort
-  abort: cannot specify both --confirm and --abort
+  abort: cannot specify both --abort and --confirm
   [255]
   $ hg rebase -s 2 -d . --confirm --continue
-  abort: cannot specify both --confirm and --continue
+  abort: cannot specify both --continue and --confirm
   [255]
 
 Test --confirm option when there are no conflicts:
@@ -862,3 +865,58 @@
   warning: conflicts while merging foo! (edit, then use 'hg resolve --mark')
   unresolved conflicts (see hg resolve, then hg rebase --continue)
   [1]
+
+  $ cd $TESTTMP
+
+Test rebasing when we're in the middle of a rebase already
+  $ hg init test_issue6214
+  $ cd test_issue6214
+  $ echo r0 > r0
+  $ hg ci -qAm 'r0'
+  $ echo hi > foo
+  $ hg ci -qAm 'hi from foo'
+  $ hg co -q '.^'
+  $ echo bye > foo
+  $ hg ci -qAm 'bye from foo'
+  $ hg co -q '.^'
+  $ echo unrelated > some_other_file
+  $ hg ci -qAm 'some unrelated changes'
+  $ hg log -G -T'{rev}: {desc}\n{files%"{file}\n"}'
+  @  3: some unrelated changes
+  |  some_other_file
+  | o  2: bye from foo
+  |/   foo
+  | o  1: hi from foo
+  |/   foo
+  o  0: r0
+     r0
+  $ hg rebase -r 2 -d 1 -t:merge3
+  rebasing 2:b4d249fbf8dd "bye from foo"
+  merging foo
+  hit merge conflicts; re-running rebase without in-memory merge
+  rebasing 2:b4d249fbf8dd "bye from foo"
+  merging foo
+  warning: conflicts while merging foo! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase -r 3 -d 1 -t:merge3
+  abort: rebase in progress
+  (use 'hg rebase --continue' or 'hg rebase --abort')
+  [255]
+  $ hg resolve --list
+  U foo
+  $ hg resolve --all --re-merge -t:other
+  (no more unresolved files)
+  continue: hg rebase --continue
+  $ hg rebase --continue
+  rebasing 2:b4d249fbf8dd "bye from foo"
+  saved backup bundle to $TESTTMP/test_issue6214/.hg/strip-backup/b4d249fbf8dd-299ec25c-rebase.hg
+  $ hg log -G -T'{rev}: {desc}\n{files%"{file}\n"}'
+  o  3: bye from foo
+  |  foo
+  | @  2: some unrelated changes
+  | |  some_other_file
+  o |  1: hi from foo
+  |/   foo
+  o  0: r0
+     r0
--- a/tests/test-rebase-obsolete.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-rebase-obsolete.t	Tue Jan 21 13:14:51 2020 -0500
@@ -487,7 +487,7 @@
   $ cp -R hidden stabilize
   $ cd stabilize
   $ hg rebase --auto-orphans '0::' -d 10
-  abort: --auto-orphans is incompatible with --dest
+  abort: cannot specify both --auto-orphans and --dest
   [255]
   $ hg rebase --auto-orphans '0::'
   rebasing 9:cf44d2f5a9f4 "D"
@@ -2054,7 +2054,7 @@
   unresolved conflicts (see hg resolve, then hg rebase --continue)
   [1]
   $ hg rebase --stop --dry-run
-  abort: cannot specify both --dry-run and --stop
+  abort: cannot specify both --stop and --dry-run
   [255]
 
   $ hg rebase -s 3 -d 5
@@ -2062,7 +2062,7 @@
   (use 'hg rebase --continue' or 'hg rebase --abort')
   [255]
   $ hg rebase --stop --continue
-  abort: cannot use --stop with --continue
+  abort: cannot specify both --stop and --continue
   [255]
 
 Test --stop moves bookmarks of original revisions to new rebased nodes:
--- a/tests/test-rebase-parameters.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-rebase-parameters.t	Tue Jan 21 13:14:51 2020 -0500
@@ -61,7 +61,7 @@
   [1]
 
   $ hg rebase --continue --abort
-  abort: cannot use --abort with --continue
+  abort: cannot specify both --abort and --continue
   [255]
 
   $ hg rebase --continue --collapse
@@ -69,18 +69,18 @@
   [255]
 
   $ hg rebase --continue --dest 4
-  abort: abort and continue do not allow specifying revisions
+  abort: cannot specify both --continue and --dest
   [255]
 
   $ hg rebase --base 5 --source 4
-  abort: cannot specify both a source and a base
+  abort: cannot specify both --source and --base
   [255]
 
   $ hg rebase --rev 5 --source 4
-  abort: cannot specify both a revision and a source
+  abort: cannot specify both --rev and --source
   [255]
   $ hg rebase --base 5 --rev 4
-  abort: cannot specify both a revision and a base
+  abort: cannot specify both --rev and --base
   [255]
 
   $ hg rebase --base 6
--- a/tests/test-rebase-scenario-global.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-rebase-scenario-global.t	Tue Jan 21 13:14:51 2020 -0500
@@ -325,14 +325,15 @@
 
   $ hg pull --config phases.publish=True -q -r 6 . # update phase of 6
   $ hg rebase -d 0 -b 6
-  nothing to rebase
-  [1]
+  abort: cannot rebase public changesets
+  (see 'hg help phases' for details)
+  [255]
   $ hg rebase -d 5 -b 6
-  abort: can't rebase public changeset e1c4361dd923
+  abort: cannot rebase public changesets
   (see 'hg help phases' for details)
   [255]
   $ hg rebase -d 5 -r '1 + (6::)'
-  abort: can't rebase public changeset e1c4361dd923
+  abort: cannot rebase public changesets
   (see 'hg help phases' for details)
   [255]
 
@@ -452,7 +453,7 @@
   $ hg clone -q -u . ah ah1
   $ cd ah1
   $ hg rebase -r '2::8' -d 1
-  abort: can't remove original changesets with unrebased descendants
+  abort: cannot rebase changeset with children
   (use --keep to keep original changesets)
   [255]
   $ hg rebase -r '2::8' -d 1 -k
@@ -498,7 +499,7 @@
   $ hg clone -q -u . ah ah2
   $ cd ah2
   $ hg rebase -r '3::8' -d 1
-  abort: can't remove original changesets with unrebased descendants
+  abort: cannot rebase changeset with children
   (use --keep to keep original changesets)
   [255]
   $ hg rebase -r '3::8' -d 1 --keep
@@ -541,7 +542,7 @@
   $ hg clone -q -u . ah ah3
   $ cd ah3
   $ hg rebase -r '3::7' -d 1
-  abort: can't remove original changesets with unrebased descendants
+  abort: cannot rebase changeset with children
   (use --keep to keep original changesets)
   [255]
   $ hg rebase -r '3::7' -d 1 --keep
@@ -581,7 +582,7 @@
   $ hg clone -q -u . ah ah4
   $ cd ah4
   $ hg rebase -r '3::(7+5)' -d 1
-  abort: can't remove original changesets with unrebased descendants
+  abort: cannot rebase changeset with children
   (use --keep to keep original changesets)
   [255]
   $ hg rebase -r '3::(7+5)' -d 1 --keep
--- a/tests/test-releasenotes-formatting.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-releasenotes-formatting.t	Tue Jan 21 13:14:51 2020 -0500
@@ -428,11 +428,11 @@
   $ hg init relnotes-raise-error
   $ cd relnotes-raise-error
   $ hg releasenotes -r . -l
-  abort: cannot use both '--list' and '--rev'
+  abort: cannot specify both --list and --rev
   [255]
 
   $ hg releasenotes -l -c
-  abort: cannot use both '--list' and '--check'
+  abort: cannot specify both --list and --check
   [255]
 
 Display release notes for specified revs if no file is mentioned
--- a/tests/test-remotefilelog-datapack.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-remotefilelog-datapack.py	Tue Jan 21 13:14:51 2020 -0500
@@ -237,7 +237,7 @@
             f.write(raw)
 
         try:
-            pack = self.datapackreader(pack.path)
+            self.datapackreader(pack.path)
             self.assertTrue(False, "bad version number should have thrown")
         except RuntimeError:
             pass
--- a/tests/test-remotefilelog-histpack.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-remotefilelog-histpack.py	Tue Jan 21 13:14:51 2020 -0500
@@ -252,7 +252,7 @@
             f.write(raw)
 
         try:
-            pack = historypack.historypack(pack.path)
+            historypack.historypack(pack.path)
             self.assertTrue(False, "bad version number should have thrown")
         except RuntimeError:
             pass
--- a/tests/test-remotefilelog-prefetch.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-remotefilelog-prefetch.t	Tue Jan 21 13:14:51 2020 -0500
@@ -236,3 +236,36 @@
   $ hg revert -a -r 1 || true
   3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over * (glob)
   abort: z2@109c3a557a73: not found in manifest! (?)
+
+# warning when we have excess remotefilelog fetching
+
+  $ cat > repeated_fetch.py << EOF
+  > import binascii
+  > from mercurial import extensions, registrar
+  > cmdtable = {}
+  > command = registrar.command(cmdtable)
+  > @command(b'repeated-fetch', [], b'', inferrepo=True)
+  > def repeated_fetch(ui, repo, *args, **opts):
+  >     for i in range(20):
+  >         try:
+  >             hexid = (b'%02x' % (i + 1)) * 20
+  >             repo.fileservice.prefetch([(b'somefile.txt', hexid)])
+  >         except Exception:
+  >             pass
+  > EOF
+
+We should only output to the user once. We're ignoring most of the output
+because we're not actually fetching anything real here, all the hashes are
+bogus, so it's just going to be errors and a final summary of all the misses.
+  $ hg --config extensions.repeated_fetch=repeated_fetch.py \
+  >    --config remotefilelog.fetchwarning="fetch warning!" \
+  >    --config extensions.blackbox= \
+  >    repeated-fetch 2>&1 | grep 'fetch warning'
+  fetch warning!
+
+We should output to blackbox three times, with a stack trace on each (though
+that isn't tested here).
+  $ grep 'excess remotefilelog fetching' .hg/blackbox.log
+  .* excess remotefilelog fetching: (re)
+  .* excess remotefilelog fetching: (re)
+  .* excess remotefilelog fetching: (re)
--- a/tests/test-repair-strip.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-repair-strip.t	Tue Jan 21 13:14:51 2020 -0500
@@ -51,7 +51,7 @@
   transaction abort!
   failed to truncate data/b.i
   rollback failed - please run hg recover
-  (failure reason: [Errno 13] Permission denied .hg/store/data/b.i')
+  (failure reason: [Errno *] Permission denied .hg/store/data/b.i') (glob)
   strip failed, backup bundle
   abort: Permission denied .hg/store/data/b.i'
   % after update 0, strip 2
@@ -105,7 +105,7 @@
   transaction abort!
   failed to truncate 00manifest.i
   rollback failed - please run hg recover
-  (failure reason: [Errno 13] Permission denied .hg/store/00manifest.i')
+  (failure reason: [Errno *] Permission denied .hg/store/00manifest.i') (glob)
   strip failed, backup bundle
   abort: Permission denied .hg/store/00manifest.i'
   % after update 0, strip 2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-repo-filters-tiptoe.t	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,146 @@
+===================================
+Test repository filtering avoidance
+===================================
+
+This test file is a bit special as he does not check feature, but performance related internal code path.
+
+Right now, filtering a repository comes with a cost that might be significant.
+Until this get better, ther are various operation that try hard not to trigger
+a filtering computation. This test file make sure we don't reintroduce code that trigger the filtering for these operation:
+
+Setup
+-----
+  $ hg init test-repo
+  $ cd test-repo
+  $ echo "some line" > z
+  $ echo a > a
+  $ hg commit -Am a
+  adding a
+  adding z
+  $ echo "in a" >> z
+  $ echo b > b
+  $ hg commit -Am b
+  adding b
+  $ echo "file" >> z
+  $ echo c > c
+  $ hg commit -Am c
+  adding c
+  $ hg rm a
+  $ echo c1 > c
+  $ hg add c
+  c already tracked!
+  $ echo d > d
+  $ hg add d
+  $ rm b
+
+  $ cat << EOF >> $HGRCPATH
+  > [devel]
+  > debug.repo-filters = yes
+  > [ui]
+  > debug = yes
+  > EOF
+
+
+tests
+-----
+
+Getting the node of `null`
+
+  $ hg log -r null -T "{node}\n"
+  0000000000000000000000000000000000000000
+
+Getting basic changeset inforation about `null`
+
+  $ hg log -r null -T "{node}\n{date}\n"
+  0000000000000000000000000000000000000000
+  0.00
+
+Getting status of null
+
+  $ hg status --change null
+
+Getting status of working copy
+
+  $ hg status
+  M c
+  A d
+  R a
+  ! b
+
+Getting data about the working copy parent
+
+  $ hg log -r '.' -T "{node}\n{date}\n"
+  c2932ca7786be30b67154d541a8764fae5532261
+  0.00
+
+Getting working copy diff
+
+  $ hg diff
+  diff -r c2932ca7786be30b67154d541a8764fae5532261 a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,1 +0,0 @@
+  -a
+  diff -r c2932ca7786be30b67154d541a8764fae5532261 c
+  --- a/c	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,1 +1,1 @@
+  -c
+  +c1
+  diff -r c2932ca7786be30b67154d541a8764fae5532261 d
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/d	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +d
+  $ hg diff --change .
+  diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +c
+  diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
+  --- a/z	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/z	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,2 +1,3 @@
+   some line
+   in a
+  +file
+
+exporting the current changeset
+
+  $ hg export
+  exporting patch:
+  # HG changeset patch
+  # User test
+  # Date 0 0
+  #      Thu Jan 01 00:00:00 1970 +0000
+  # Node ID c2932ca7786be30b67154d541a8764fae5532261
+  # Parent  05293e5dd8d1ae4f84a8520a11c6f97cad26deca
+  c
+  
+  diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +c
+  diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
+  --- a/z	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/z	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,2 +1,3 @@
+   some line
+   in a
+  +file
+
+using annotate
+
+- file with a single change
+
+  $ hg annotate a
+  0: a
+
+- file with multiple change
+
+  $ hg annotate z
+  0: some line
+  1: in a
+  2: file
--- a/tests/test-rust-ancestor.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-rust-ancestor.py	Tue Jan 21 13:14:51 2020 -0500
@@ -7,6 +7,8 @@
     node,
 )
 
+from mercurial.testing import revlog as revlogtesting
+
 try:
     from mercurial import rustext
 
@@ -27,34 +29,18 @@
 except ImportError:
     cparsers = None
 
-# picked from test-parse-index2, copied rather than imported
-# so that it stays stable even if test-parse-index2 changes or disappears.
-data_non_inlined = (
-    b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01D\x19'
-    b'\x00\x07e\x12\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff'
-    b'\xff\xff\xff\xff\xd1\xf4\xbb\xb0\xbe\xfc\x13\xbd\x8c\xd3\x9d'
-    b'\x0f\xcd\xd9;\x8c\x07\x8cJ/\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-    b'\x00\x00\x00\x00\x00\x00\x01D\x19\x00\x00\x00\x00\x00\xdf\x00'
-    b'\x00\x01q\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff'
-    b'\xff\xff\xff\xc1\x12\xb9\x04\x96\xa4Z1t\x91\xdfsJ\x90\xf0\x9bh'
-    b'\x07l&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-    b'\x00\x01D\xf8\x00\x00\x00\x00\x01\x1b\x00\x00\x01\xb8\x00\x00'
-    b'\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\x02\n'
-    b'\x0e\xc6&\xa1\x92\xae6\x0b\x02i\xfe-\xe5\xbao\x05\xd1\xe7\x00'
-    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01F'
-    b'\x13\x00\x00\x00\x00\x01\xec\x00\x00\x03\x06\x00\x00\x00\x01'
-    b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1'
-    b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00'
-    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-)
-
 
 @unittest.skipIf(
-    rustext is None or cparsers is None,
-    "rustext or the C Extension parsers module "
-    "ancestor relies on is not available",
+    rustext is None,
+    'The Rust version of the "ancestor" module is not available. It is needed'
+    ' for this test.',
 )
-class rustancestorstest(unittest.TestCase):
+@unittest.skipIf(
+    rustext is None,
+    'The Rust or C version of the "parsers" module, which the "ancestor" module'
+    ' relies on, is not available.',
+)
+class rustancestorstest(revlogtesting.RevlogBasedTestBase):
     """Test the correctness of binding to Rust code.
 
     This test is merely for the binding to Rust itself: extraction of
@@ -67,9 +53,6 @@
     Algorithmic correctness is asserted by the Rust unit tests.
     """
 
-    def parseindex(self):
-        return cparsers.parse_index2(data_non_inlined, False)[0]
-
     def testiteratorrevlist(self):
         idx = self.parseindex()
         # checking test assumption about the index binary data:
@@ -150,7 +133,9 @@
 
     def testgrapherror(self):
         data = (
-            data_non_inlined[: 64 + 27] + b'\xf2' + data_non_inlined[64 + 28 :]
+            revlogtesting.data_non_inlined[: 64 + 27]
+            + b'\xf2'
+            + revlogtesting.data_non_inlined[64 + 28 :]
         )
         idx = cparsers.parse_index2(data, False)[0]
         with self.assertRaises(rustext.GraphError) as arc:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rust-revlog.py	Tue Jan 21 13:14:51 2020 -0500
@@ -0,0 +1,60 @@
+from __future__ import absolute_import
+import unittest
+
+try:
+    from mercurial import rustext
+
+    rustext.__name__  # trigger immediate actual import
+except ImportError:
+    rustext = None
+else:
+    from mercurial.rustext import revlog
+
+    # this would fail already without appropriate ancestor.__package__
+    from mercurial.rustext.ancestor import LazyAncestors
+
+from mercurial.testing import revlog as revlogtesting
+
+
+@unittest.skipIf(
+    rustext is None, "rustext module revlog relies on is not available",
+)
+class RustRevlogIndexTest(revlogtesting.RevlogBasedTestBase):
+    def test_heads(self):
+        idx = self.parseindex()
+        rustidx = revlog.MixedIndex(idx)
+        self.assertEqual(rustidx.headrevs(), idx.headrevs())
+
+    def test_get_cindex(self):
+        # drop me once we no longer need the method for shortest node
+        idx = self.parseindex()
+        rustidx = revlog.MixedIndex(idx)
+        cidx = rustidx.get_cindex()
+        self.assertTrue(idx is cidx)
+
+    def test_len(self):
+        idx = self.parseindex()
+        rustidx = revlog.MixedIndex(idx)
+        self.assertEqual(len(rustidx), len(idx))
+
+    def test_ancestors(self):
+        idx = self.parseindex()
+        rustidx = revlog.MixedIndex(idx)
+        lazy = LazyAncestors(rustidx, [3], 0, True)
+        # we have two more references to the index:
+        # - in its inner iterator for __contains__ and __bool__
+        # - in the LazyAncestors instance itself (to spawn new iterators)
+        self.assertTrue(2 in lazy)
+        self.assertTrue(bool(lazy))
+        self.assertEqual(list(lazy), [3, 2, 1, 0])
+        # a second time to validate that we spawn new iterators
+        self.assertEqual(list(lazy), [3, 2, 1, 0])
+
+        # let's check bool for an empty one
+        self.assertFalse(LazyAncestors(idx, [0], 0, False))
+
+
+if __name__ == '__main__':
+    import silenttestrunner
+
+    silenttestrunner.main(__name__)
--- a/tests/test-shelve.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-shelve.t	Tue Jan 21 13:14:51 2020 -0500
@@ -951,6 +951,16 @@
   +++ b/jungle
   @@ -0,0 +1,1 @@
   +babar
+
+Test shelve --delete
+
+  $ hg shelve --list
+  default         (*s ago)    changes to: create conflict (glob)
+  $ hg shelve --delete doesnotexist
+  abort: shelved change 'doesnotexist' not found
+  [255]
+  $ hg shelve --delete default
+
   $ cd ..
 
 Test visibility of in-memory changes inside transaction to external hook
--- a/tests/test-split.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-split.t	Tue Jan 21 13:14:51 2020 -0500
@@ -77,7 +77,7 @@
 
   $ hg phase --public -r 'all()'
   $ hg split .
-  abort: cannot split public changeset
+  abort: cannot split public changesets
   (see 'hg help phases' for details)
   [255]
 
@@ -466,7 +466,7 @@
   $ cd $TESTTMP/d
 #if obsstore-off
   $ runsplit -r 1 --no-rebase
-  abort: cannot split changeset with children without rebase
+  abort: cannot split changeset with children
   [255]
 #else
   $ runsplit -r 1 --no-rebase >/dev/null
@@ -517,7 +517,7 @@
   $ eval `hg tags -T '{tag}={node}\n'`
   $ rm .hg/localtags
   $ hg split $B --config experimental.evolution=createmarkers
-  abort: split would leave orphaned changesets behind
+  abort: cannot split changeset with children
   [255]
   $ cat > $TESTTMP/messages <<EOF
   > Split B
--- a/tests/test-status.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-status.t	Tue Jan 21 13:14:51 2020 -0500
@@ -254,35 +254,43 @@
   $ hg status -A -Tjson
   [
    {
+    "itemtype": "file",
     "path": "added",
     "status": "A"
    },
    {
+    "itemtype": "file",
     "path": "copied",
     "source": "modified",
     "status": "A"
    },
    {
+    "itemtype": "file",
     "path": "removed",
     "status": "R"
    },
    {
+    "itemtype": "file",
     "path": "deleted",
     "status": "!"
    },
    {
+    "itemtype": "file",
     "path": "unknown",
     "status": "?"
    },
    {
+    "itemtype": "file",
     "path": "ignored",
     "status": "I"
    },
    {
+    "itemtype": "file",
     "path": ".hgignore",
     "status": "C"
    },
    {
+    "itemtype": "file",
     "path": "modified",
     "status": "C"
    }
@@ -558,6 +566,7 @@
   $ hg status --config ui.formatdebug=True --rev 1 1
   status = [
       {
+          'itemtype': 'file',
           'path': '1/2/3/4/5/b.txt',
           'status': 'R'
       },
--- a/tests/test-subrepo-svn.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-subrepo-svn.t	Tue Jan 21 13:14:51 2020 -0500
@@ -87,10 +87,12 @@
 
   $ hg debugsub
   path s
-   source   file:/*/$TESTTMP/svn-repo/src (glob)
+   source   file:/*/svn-repo/src (glob) (windows !)
+   source   file:/*/$TESTTMP/svn-repo/src (glob) (no-windows !)
    revision 2
   path subdir/s
-   source   file:/*/$TESTTMP/svn-repo/src (glob)
+   source   file:/*/svn-repo/src (glob) (windows !)
+   source   file:/*/$TESTTMP/svn-repo/src (glob) (no-windows !)
    revision 2
 
 change file in svn and hg, commit
@@ -113,10 +115,12 @@
   At revision 3.
   $ hg debugsub
   path s
-   source   file:/*/$TESTTMP/svn-repo/src (glob)
+   source   file:/*/svn-repo/src (glob) (windows !)
+   source   file:/*/$TESTTMP/svn-repo/src (glob) (no-windows !)
    revision 3
   path subdir/s
-   source   file:/*/$TESTTMP/svn-repo/src (glob)
+   source   file:/*/svn-repo/src (glob) (windows !)
+   source   file:/*/$TESTTMP/svn-repo/src (glob) (no-windows !)
    revision 2
 
 missing svn file, commit should fail
@@ -235,10 +239,12 @@
 
   $ hg debugsub
   path s
-   source   file:/*/$TESTTMP/svn-repo/src (glob)
+   source   file:/*/svn-repo/src (glob) (windows !)
+   source   file:/*/$TESTTMP/svn-repo/src (glob) (no-windows !)
    revision 3
   path subdir/s
-   source   file:/*/$TESTTMP/svn-repo/src (glob)
+   source   file:/*/svn-repo/src (glob) (windows !)
+   source   file:/*/$TESTTMP/svn-repo/src (glob) (no-windows !)
    revision 2
 
 verify subrepo is contained within the repo directory
@@ -574,8 +580,8 @@
 Test forgetting files, not implemented in svn subrepo, used to
 traceback
 
-  $ hg forget 'notafile*'
-  notafile*: $ENOENT$
+  $ hg forget 'notafile'
+  notafile: $ENOENT$
   [1]
 
 Test a subrepo referencing a just moved svn path. Last commit rev will
--- a/tests/test-template-functions.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-template-functions.t	Tue Jan 21 13:14:51 2020 -0500
@@ -1504,6 +1504,21 @@
   
   >> other 3
 
+Test indent with empty first line
+
+  $ hg version -T "{indent('', '>> ')}\n"
+  
+
+  $ hg version -T "{indent('
+  > second', '>> ')}\n"
+  
+  >> second
+
+  $ hg version -T "{indent('
+  > second', '>> ', ' > ')}\n"
+  
+  >> second
+
 Test with non-strings like dates
 
   $ hg log -T "{indent(date, '   ')}\n" -r 2:3 -R a
--- a/tests/test-transplant.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-transplant.t	Tue Jan 21 13:14:51 2020 -0500
@@ -18,10 +18,10 @@
   abort: no source URL, branch revision, or revision list provided
   [255]
   $ hg transplant --continue --all
-  abort: --continue is incompatible with --branch, --all and --merge
+  abort: cannot specify both --continue and --all
   [255]
   $ hg transplant --stop --all
-  abort: --stop is incompatible with --branch, --all and --merge
+  abort: cannot specify both --stop and --all
   [255]
   $ hg transplant --all tip
   abort: --all requires a branch revision
--- a/tests/test-trusted.py.out	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-trusted.py.out	Tue Jan 21 13:14:51 2020 -0500
@@ -174,7 +174,7 @@
 # parse error
 # different user, different group
 not trusting file .hg/hgrc from untrusted user abc, group def
-ParseError('foo', '.hg/hgrc:1')
+ignored: ('foo', '.hg/hgrc:1')
 # same user, same group
 ParseError('foo', '.hg/hgrc:1')
 
--- a/tests/test-unamend.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-unamend.t	Tue Jan 21 13:14:51 2020 -0500
@@ -346,6 +346,14 @@
   $ hg mv c wat
   $ hg unamend
 
+  $ hg verify -v
+  repository uses revlog format 1
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 28 changesets with 16 changes to 11 files
+
 Retained copies in new prdecessor commit
 
   $ hg exp --git
--- a/tests/test-uncommit.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-uncommit.t	Tue Jan 21 13:14:51 2020 -0500
@@ -554,10 +554,10 @@
 
   $ hg rollback -q --config ui.rollback=True
   $ hg uncommit -U --user 'user'
-  abort: --user and --currentuser are mutually exclusive
+  abort: cannot specify both --user and --currentuser
   [255]
   $ hg uncommit -D --date today
-  abort: --date and --currentdate are mutually exclusive
+  abort: cannot specify both --date and --currentdate
   [255]
 
 `uncommit <dir>` and `cd <dir> && uncommit .` behave the same...
--- a/tests/test-update-branches.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-update-branches.t	Tue Jan 21 13:14:51 2020 -0500
@@ -1,3 +1,8 @@
+  $ cat >> $HGRCPATH <<EOF
+  > [commands]
+  > status.verbose=1
+  > EOF
+
 # Construct the following history tree:
 #
 # @  5:e1bb631146ca  b1
@@ -247,6 +252,12 @@
   $ hg st
   M a
   ? a.orig
+  # Unresolved merge conflicts:
+  # 
+  #     a
+  # 
+  # To mark files as resolved:  hg resolve --mark FILE
+  
   $ cat a
   <<<<<<< working copy: 6efa171f091b - test: 3
   three
@@ -308,6 +319,16 @@
   use 'hg resolve' to retry unresolved file merges
   [1]
   $ rm a.orig
+  $ hg status
+  M a
+  # Unresolved merge conflicts:
+  # 
+  #     a
+  # 
+  # To mark files as resolved:  hg resolve --mark FILE
+  
+  $ hg resolve -l
+  U a
 
 Change/delete conflict is not allowed
   $ hg up -qC 3
@@ -536,13 +557,71 @@
   updated to hidden changeset 6efa171f091b
   (hidden revision '6efa171f091b' was rewritten as: d047485b3896)
   [1]
+
+Test that statuses are reported properly before and after merge resolution.
+  $ rm a.orig
+  $ hg resolve -l
+  U a
+  $ hg status
+  M a
+  M foo
+  # Unresolved merge conflicts:
+  # 
+  #     a
+  # 
+  # To mark files as resolved:  hg resolve --mark FILE
+  
+
   $ hg revert -r . a
+
+  $ rm a.orig
+  $ hg resolve -l
+  U a
+  $ hg status
+  M foo
+  # Unresolved merge conflicts:
+  # 
+  #     a
+  # 
+  # To mark files as resolved:  hg resolve --mark FILE
+  
+  $ hg status -Tjson
+  [
+   {
+    "itemtype": "file",
+    "path": "foo",
+    "status": "M"
+   },
+   {
+    "itemtype": "file",
+    "path": "a",
+    "unresolved": true
+   }
+  ]
+
   $ hg resolve -m
   (no more unresolved files)
 
+  $ hg resolve -l
+  R a
+  $ hg status
+  M foo
+  # No unresolved merge conflicts.
+  
+  $ hg status -Tjson
+  [
+   {
+    "itemtype": "file",
+    "path": "foo",
+    "status": "M"
+   }
+  ]
+
 Test that 4 is detected as the no-argument destination from 3 and also moves
 the bookmark with it
   $ hg up --quiet 0          # we should be able to update to 3 directly
+  $ hg status
+  M foo
   $ hg up --quiet --hidden 3 # but not implemented yet.
   updated to hidden changeset 6efa171f091b
   (hidden revision '6efa171f091b' was rewritten as: d047485b3896)
--- a/tests/test-worker.t	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-worker.t	Tue Jan 21 13:14:51 2020 -0500
@@ -2,6 +2,7 @@
 
   $ cat > t.py <<EOF
   > from __future__ import absolute_import, print_function
+  > import sys
   > import time
   > from mercurial import (
   >     error,
@@ -9,6 +10,7 @@
   >     ui as uimod,
   >     worker,
   > )
+  > sys.unraisablehook = lambda x: None
   > def abort(ui, args):
   >     if args[0] == 0:
   >         # by first worker for test stability
@@ -101,7 +103,9 @@
   > from __future__ import absolute_import
   > import atexit
   > import os
+  > import sys
   > import time
+  > sys.unraisablehook = lambda x: None
   > oldfork = os.fork
   > count = 0
   > parentpid = os.getpid()
--- a/tests/test-wsgirequest.py	Thu Jan 09 14:19:20 2020 -0500
+++ b/tests/test-wsgirequest.py	Tue Jan 21 13:14:51 2020 -0500
@@ -6,17 +6,17 @@
 from mercurial import error
 
 DEFAULT_ENV = {
-    r'REQUEST_METHOD': r'GET',
-    r'SERVER_NAME': r'testserver',
-    r'SERVER_PORT': r'80',
-    r'SERVER_PROTOCOL': r'http',
-    r'wsgi.version': (1, 0),
-    r'wsgi.url_scheme': r'http',
-    r'wsgi.input': None,
-    r'wsgi.errors': None,
-    r'wsgi.multithread': False,
-    r'wsgi.multiprocess': True,
-    r'wsgi.run_once': False,
+    'REQUEST_METHOD': 'GET',
+    'SERVER_NAME': 'testserver',
+    'SERVER_PORT': '80',
+    'SERVER_PROTOCOL': 'http',
+    'wsgi.version': (1, 0),
+    'wsgi.url_scheme': 'http',
+    'wsgi.input': None,
+    'wsgi.errors': None,
+    'wsgi.multithread': False,
+    'wsgi.multiprocess': True,
+    'wsgi.run_once': False,
 }
 
 
@@ -49,7 +49,7 @@
         self.assertEqual(len(r.headers), 0)
 
     def testcustomport(self):
-        r = parse(DEFAULT_ENV, extra={r'SERVER_PORT': r'8000',})
+        r = parse(DEFAULT_ENV, extra={'SERVER_PORT': '8000',})
 
         self.assertEqual(r.url, b'http://testserver:8000')
         self.assertEqual(r.baseurl, b'http://testserver:8000')
@@ -58,7 +58,7 @@
 
         r = parse(
             DEFAULT_ENV,
-            extra={r'SERVER_PORT': r'4000', r'wsgi.url_scheme': r'https',},
+            extra={'SERVER_PORT': '4000', 'wsgi.url_scheme': 'https',},
         )
 
         self.assertEqual(r.url, b'https://testserver:4000')
@@ -67,7 +67,7 @@
         self.assertEqual(r.advertisedbaseurl, b'https://testserver:4000')
 
     def testhttphost(self):
-        r = parse(DEFAULT_ENV, extra={r'HTTP_HOST': r'altserver',})
+        r = parse(DEFAULT_ENV, extra={'HTTP_HOST': 'altserver',})
 
         self.assertEqual(r.url, b'http://altserver')
         self.assertEqual(r.baseurl, b'http://altserver')
@@ -75,7 +75,7 @@
         self.assertEqual(r.advertisedbaseurl, b'http://testserver')
 
     def testscriptname(self):
-        r = parse(DEFAULT_ENV, extra={r'SCRIPT_NAME': r'',})
+        r = parse(DEFAULT_ENV, extra={'SCRIPT_NAME': '',})
 
         self.assertEqual(r.url, b'http://testserver')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -85,7 +85,7 @@
         self.assertEqual(r.dispatchparts, [])
         self.assertIsNone(r.dispatchpath)
 
-        r = parse(DEFAULT_ENV, extra={r'SCRIPT_NAME': r'/script',})
+        r = parse(DEFAULT_ENV, extra={'SCRIPT_NAME': '/script',})
 
         self.assertEqual(r.url, b'http://testserver/script')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -95,7 +95,7 @@
         self.assertEqual(r.dispatchparts, [])
         self.assertIsNone(r.dispatchpath)
 
-        r = parse(DEFAULT_ENV, extra={r'SCRIPT_NAME': r'/multiple words',})
+        r = parse(DEFAULT_ENV, extra={'SCRIPT_NAME': '/multiple words',})
 
         self.assertEqual(r.url, b'http://testserver/multiple%20words')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -106,7 +106,7 @@
         self.assertIsNone(r.dispatchpath)
 
     def testpathinfo(self):
-        r = parse(DEFAULT_ENV, extra={r'PATH_INFO': r'',})
+        r = parse(DEFAULT_ENV, extra={'PATH_INFO': '',})
 
         self.assertEqual(r.url, b'http://testserver')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -116,7 +116,7 @@
         self.assertEqual(r.dispatchparts, [])
         self.assertEqual(r.dispatchpath, b'')
 
-        r = parse(DEFAULT_ENV, extra={r'PATH_INFO': r'/pathinfo',})
+        r = parse(DEFAULT_ENV, extra={'PATH_INFO': '/pathinfo',})
 
         self.assertEqual(r.url, b'http://testserver/pathinfo')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -126,7 +126,7 @@
         self.assertEqual(r.dispatchparts, [b'pathinfo'])
         self.assertEqual(r.dispatchpath, b'pathinfo')
 
-        r = parse(DEFAULT_ENV, extra={r'PATH_INFO': r'/one/two/',})
+        r = parse(DEFAULT_ENV, extra={'PATH_INFO': '/one/two/',})
 
         self.assertEqual(r.url, b'http://testserver/one/two/')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -139,7 +139,7 @@
     def testscriptandpathinfo(self):
         r = parse(
             DEFAULT_ENV,
-            extra={r'SCRIPT_NAME': r'/script', r'PATH_INFO': r'/pathinfo',},
+            extra={'SCRIPT_NAME': '/script', 'PATH_INFO': '/pathinfo',},
         )
 
         self.assertEqual(r.url, b'http://testserver/script/pathinfo')
@@ -153,8 +153,8 @@
         r = parse(
             DEFAULT_ENV,
             extra={
-                r'SCRIPT_NAME': r'/script1/script2',
-                r'PATH_INFO': r'/path1/path2',
+                'SCRIPT_NAME': '/script1/script2',
+                'PATH_INFO': '/path1/path2',
             },
         )
 
@@ -173,9 +173,9 @@
         r = parse(
             DEFAULT_ENV,
             extra={
-                r'HTTP_HOST': r'hostserver',
-                r'SCRIPT_NAME': r'/script',
-                r'PATH_INFO': r'/pathinfo',
+                'HTTP_HOST': 'hostserver',
+                'SCRIPT_NAME': '/script',
+                'PATH_INFO': '/pathinfo',
             },
         )
 
@@ -208,7 +208,7 @@
             parse(
                 DEFAULT_ENV,
                 reponame=b'repo',
-                extra={r'PATH_INFO': r'/pathinfo',},
+                extra={'PATH_INFO': '/pathinfo',},
             )
 
         with self.assertRaisesRegex(
@@ -217,13 +217,13 @@
             parse(
                 DEFAULT_ENV,
                 reponame=b'repo',
-                extra={r'PATH_INFO': r'/repoextra/path',},
+                extra={'PATH_INFO': '/repoextra/path',},
             )
 
         r = parse(
             DEFAULT_ENV,
             reponame=b'repo',
-            extra={r'PATH_INFO': r'/repo/path1/path2',},
+            extra={'PATH_INFO': '/repo/path1/path2',},
         )
 
         self.assertEqual(r.url, b'http://testserver/repo/path1/path2')
@@ -238,7 +238,7 @@
         r = parse(
             DEFAULT_ENV,
             reponame=b'prefix/repo',
-            extra={r'PATH_INFO': r'/prefix/repo/path1/path2',},
+            extra={'PATH_INFO': '/prefix/repo/path1/path2',},
         )
 
         self.assertEqual(r.url, b'http://testserver/prefix/repo/path1/path2')
@@ -307,7 +307,7 @@
         r = parse(
             DEFAULT_ENV,
             altbaseurl=b'http://altserver',
-            extra={r'PATH_INFO': r'/path1/path2',},
+            extra={'PATH_INFO': '/path1/path2',},
         )
         self.assertEqual(r.url, b'http://testserver/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -347,7 +347,7 @@
         r = parse(
             DEFAULT_ENV,
             altbaseurl=b'http://altserver/altpath',
-            extra={r'PATH_INFO': r'/path1/path2',},
+            extra={'PATH_INFO': '/path1/path2',},
         )
         self.assertEqual(r.url, b'http://testserver/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -365,7 +365,7 @@
         r = parse(
             DEFAULT_ENV,
             altbaseurl=b'http://altserver/altpath/',
-            extra={r'PATH_INFO': r'/path1/path2',},
+            extra={'PATH_INFO': '/path1/path2',},
         )
         self.assertEqual(r.url, b'http://testserver/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -383,7 +383,7 @@
         r = parse(
             DEFAULT_ENV,
             altbaseurl=b'http://altserver',
-            extra={r'SCRIPT_NAME': r'/script', r'PATH_INFO': r'/path1/path2',},
+            extra={'SCRIPT_NAME': '/script', 'PATH_INFO': '/path1/path2',},
         )
         self.assertEqual(r.url, b'http://testserver/script/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -399,7 +399,7 @@
         r = parse(
             DEFAULT_ENV,
             altbaseurl=b'http://altserver/altroot',
-            extra={r'SCRIPT_NAME': r'/script', r'PATH_INFO': r'/path1/path2',},
+            extra={'SCRIPT_NAME': '/script', 'PATH_INFO': '/path1/path2',},
         )
         self.assertEqual(r.url, b'http://testserver/script/path1/path2')
         self.assertEqual(r.baseurl, b'http://testserver')
@@ -418,10 +418,7 @@
             DEFAULT_ENV,
             reponame=b'repo',
             altbaseurl=b'http://altserver/altroot',
-            extra={
-                r'SCRIPT_NAME': r'/script',
-                r'PATH_INFO': r'/repo/path1/path2',
-            },
+            extra={'SCRIPT_NAME': '/script', 'PATH_INFO': '/repo/path1/path2',},
         )
 
         self.assertEqual(r.url, b'http://testserver/script/repo/path1/path2')